aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/memory-barriers.txt348
-rw-r--r--Documentation/networking/README.ipw220010
-rw-r--r--Documentation/networking/bonding.txt323
-rw-r--r--Documentation/serial/driver9
-rw-r--r--MAINTAINERS23
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c1
-rw-r--r--arch/alpha/kernel/process.c6
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/alpha/kernel/sys_titan.c2
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c8
-rw-r--r--arch/arm/mach-imx/irq.c2
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c5
-rw-r--r--arch/arm/mach-ixp23xx/core.c18
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig2
-rw-r--r--arch/arm/mach-pxa/mainstone.c1
-rw-r--r--arch/arm/mach-pxa/spitz.c1
-rw-r--r--arch/arm/mach-s3c2410/Kconfig2
-rw-r--r--arch/arm/mach-sa1100/neponset.c8
-rw-r--r--arch/arm/mach-versatile/core.c5
-rw-r--r--arch/arm/mm/mm-armv.c4
-rw-r--r--arch/arm/mm/proc-xsc3.S3
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c23
-rw-r--r--arch/i386/kernel/setup.c11
-rw-r--r--arch/i386/mach-generic/probe.c16
-rw-r--r--arch/mips/Kconfig96
-rw-r--r--arch/mips/au1000/common/irq.c1
-rw-r--r--arch/mips/au1000/common/prom.c24
-rw-r--r--arch/mips/au1000/common/sleeper.S5
-rw-r--r--arch/mips/au1000/common/time.c1
-rw-r--r--arch/mips/ddb5xxx/ddb5476/dbg_io.c2
-rw-r--r--arch/mips/ddb5xxx/ddb5477/kgdb_io.c2
-rw-r--r--arch/mips/gt64120/ev64120/serialGT.c2
-rw-r--r--arch/mips/gt64120/momenco_ocelot/dbg_io.c2
-rw-r--r--arch/mips/ite-boards/generic/dbg_io.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/cpu-bugs64.c8
-rw-r--r--arch/mips/kernel/cpu-probe.c15
-rw-r--r--arch/mips/kernel/entry.S2
-rw-r--r--arch/mips/kernel/gdb-low.S8
-rw-r--r--arch/mips/kernel/module.c6
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c18
-rw-r--r--arch/mips/kernel/signal-common.h30
-rw-r--r--arch/mips/kernel/smp.c5
-rw-r--r--arch/mips/kernel/syscall.c27
-rw-r--r--arch/mips/kernel/traps.c20
-rw-r--r--arch/mips/kernel/vmlinux.lds.S20
-rw-r--r--arch/mips/math-emu/dp_fint.c4
-rw-r--r--arch/mips/math-emu/dp_flong.c4
-rw-r--r--arch/mips/math-emu/sp_fint.c4
-rw-r--r--arch/mips/math-emu/sp_flong.c4
-rw-r--r--arch/mips/mm/c-r4k.c78
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/pg-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/momentum/jaguar_atx/dbg_io.c2
-rw-r--r--arch/mips/momentum/ocelot_c/dbg_io.c2
-rw-r--r--arch/mips/momentum/ocelot_g/dbg_io.c2
-rw-r--r--arch/mips/oprofile/common.c9
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c34
-rw-r--r--arch/mips/oprofile/op_model_rm9000.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/mm/hash_native_64.c4
-rw-r--r--arch/powerpc/platforms/cell/setup.c11
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c12
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c18
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/sparc/kernel/smp.c11
-rw-r--r--arch/sparc64/kernel/head.S30
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c124
-rw-r--r--arch/sparc64/kernel/setup.c23
-rw-r--r--arch/sparc64/kernel/smp.c51
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/traps.c11
-rw-r--r--arch/sparc64/lib/checksum.S5
-rw-r--r--arch/sparc64/lib/csum_copy.S5
-rw-r--r--arch/um/Makefile-i3864
-rw-r--r--arch/um/include/kern_util.h13
-rw-r--r--arch/um/kernel/time_kern.c10
-rw-r--r--arch/um/os-Linux/main.c2
-rw-r--r--arch/um/os-Linux/time.c10
-rw-r--r--arch/um/sys-i386/syscalls.c9
-rw-r--r--arch/um/sys-x86_64/signal.c24
-rw-r--r--arch/um/sys-x86_64/syscalls.c2
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c4
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/entry.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c30
-rw-r--r--arch/x86_64/kernel/pci-dma.c4
-rw-r--r--arch/x86_64/kernel/pci-gart.c6
-rw-r--r--arch/x86_64/kernel/pmtimer.c2
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/mm/srat.c4
-rw-r--r--block/as-iosched.c13
-rw-r--r--block/cfq-iosched.c98
-rw-r--r--block/deadline-iosched.c13
-rw-r--r--block/elevator.c55
-rw-r--r--block/noop-iosched.c7
-rw-r--r--drivers/acpi/pci_link.c3
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/base/power/suspend.c5
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c38
-rw-r--r--drivers/char/n_tty.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/tpm/tpm_bios.c89
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/ide/pci/sgiioc4.c16
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/md/md.c11
-rw-r--r--drivers/message/fusion/mptbase.c27
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/exec-osm.c72
-rw-r--r--drivers/message/i2o/iop.c4
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c503.c2
-rw-r--r--drivers/net/3c505.c2
-rw-r--r--drivers/net/3c507.c2
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/3c527.c2
-rw-r--r--drivers/net/8139cp.c14
-rw-r--r--drivers/net/8139too.c14
-rw-r--r--drivers/net/Kconfig34
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/au1000_eth.c1802
-rw-r--r--drivers/net/au1000_eth.h134
-rw-r--r--drivers/net/cassini.c9
-rw-r--r--drivers/net/e100.c75
-rw-r--r--drivers/net/e1000/Makefile3
-rw-r--r--drivers/net/e1000/e1000.h6
-rw-r--r--drivers/net/e1000/e1000_ethtool.c46
-rw-r--r--drivers/net/e1000/e1000_hw.c115
-rw-r--r--drivers/net/e1000/e1000_hw.h7
-rw-r--r--drivers/net/e1000/e1000_main.c393
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000/e1000_param.c3
-rw-r--r--drivers/net/epic100.c56
-rw-r--r--drivers/net/forcedeth.c1763
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/hp.c4
-rw-r--r--drivers/net/ibmlana.c20
-rw-r--r--drivers/net/ibmlana.h6
-rw-r--r--drivers/net/ibmveth.c291
-rw-r--r--drivers/net/ibmveth.h11
-rw-r--r--drivers/net/ixgb/Makefile2
-rw-r--r--drivers/net/ixgb/ixgb.h12
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/ixgb/ixgb_ee.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c57
-rw-r--r--drivers/net/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ixgb/ixgb_hw.h3
-rw-r--r--drivers/net/ixgb/ixgb_ids.h6
-rw-r--r--drivers/net/ixgb/ixgb_main.c304
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h2
-rw-r--r--drivers/net/ixgb/ixgb_param.c26
-rw-r--r--drivers/net/myri10ge/Makefile5
-rw-r--r--drivers/net/myri10ge/myri10ge.c2869
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h205
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h58
-rw-r--r--drivers/net/ne.c2
-rw-r--r--drivers/net/ne2.c2
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c42
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/smsc.c101
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/r8169.c1
-rw-r--r--drivers/net/s2io-regs.h32
-rw-r--r--drivers/net/s2io.c1476
-rw-r--r--drivers/net/s2io.h59
-rw-r--r--drivers/net/sis900.c26
-rw-r--r--drivers/net/sis900.h10
-rw-r--r--drivers/net/skge.c232
-rw-r--r--drivers/net/skge.h6
-rw-r--r--drivers/net/sky2.c53
-rw-r--r--drivers/net/smc-ultra.c2
-rw-r--r--drivers/net/smc-ultra32.c2
-rw-r--r--drivers/net/smc911x.c2307
-rw-r--r--drivers/net/smc911x.h835
-rw-r--r--drivers/net/smc9194.c7
-rw-r--r--drivers/net/smc91x.h18
-rw-r--r--drivers/net/sungem_phy.c6
-rw-r--r--drivers/net/tg3.c144
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/tulip/de2104x.c58
-rw-r--r--drivers/net/tulip/de4x5.c716
-rw-r--r--drivers/net/tulip/de4x5.h14
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/eeprom.c8
-rw-r--r--drivers/net/tulip/interrupt.c126
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/tulip.h2
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/tulip/uli526x.c80
-rw-r--r--drivers/net/tulip/winbond-840.c26
-rw-r--r--drivers/net/tulip/xircom_cb.c208
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/wan/pci200syn.c27
-rw-r--r--drivers/net/wireless/Kconfig47
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/airo.c271
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c31
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c48
-rw-r--r--drivers/net/wireless/hermes.c66
-rw-r--r--drivers/net/wireless/hermes.h43
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2200.c849
-rw-r--r--drivers/net/wireless/ipw2200.h83
-rw-r--r--drivers/net/wireless/orinoco.c251
-rw-r--r--drivers/net/wireless/orinoco.h19
-rw-r--r--drivers/net/wireless/orinoco_cs.c42
-rw-r--r--drivers/net/wireless/orinoco_nortel.c171
-rw-r--r--drivers/net/wireless/orinoco_pci.c210
-rw-r--r--drivers/net/wireless/orinoco_pci.h104
-rw-r--r--drivers/net/wireless/orinoco_plx.c223
-rw-r--r--drivers/net/wireless/orinoco_tmd.c99
-rw-r--r--drivers/net/wireless/spectrum_cs.c81
-rw-r--r--drivers/net/wireless/zd1201.c (renamed from drivers/usb/net/zd1201.c)67
-rw-r--r--drivers/net/wireless/zd1201.h (renamed from drivers/usb/net/zd1201.h)0
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c21
-rw-r--r--drivers/pcmcia/ds.c6
-rw-r--r--drivers/rtc/rtc-m48t86.c72
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/net/Makefile3
-rw-r--r--drivers/s390/net/ctcmain.c45
-rw-r--r--drivers/s390/net/ctcmain.h12
-rw-r--r--drivers/s390/net/ctctty.c1259
-rw-r--r--drivers/s390/net/ctctty.h35
-rw-r--r--drivers/scsi/libata-core.c1
-rw-r--r--drivers/scsi/ppa.c7
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/scsi/sata_sil24.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/usb/net/Kconfig17
-rw-r--r--drivers/usb/net/Makefile1
-rw-r--r--drivers/video/au1100fb.c21
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/maxinefb.c4
-rw-r--r--fs/bio.c5
-rw-r--r--fs/debugfs/inode.c3
-rw-r--r--fs/ext3/resize.c1
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namei.c19
-rw-r--r--include/asm-alpha/smp.h4
-rw-r--r--include/asm-arm/arch-ixp23xx/memory.h2
-rw-r--r--include/asm-arm/arch-l7200/serial_l7200.h2
-rw-r--r--include/asm-arm/arch-l7200/uncompress.h2
-rw-r--r--include/asm-arm/arch-pxa/ohci.h2
-rw-r--r--include/asm-arm/system.h6
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-mips/addrspace.h1
-rw-r--r--include/asm-mips/cpu.h6
-rw-r--r--include/asm-mips/delay.h22
-rw-r--r--include/asm-mips/futex.h141
-rw-r--r--include/asm-mips/inst.h33
-rw-r--r--include/asm-mips/mipsregs.h2
-rw-r--r--include/asm-mips/page.h2
-rw-r--r--include/asm-mips/pgtable-32.h61
-rw-r--r--include/asm-mips/pgtable-64.h13
-rw-r--r--include/asm-mips/pgtable.h103
-rw-r--r--include/asm-mips/sigcontext.h10
-rw-r--r--include/asm-mips/smp.h5
-rw-r--r--include/asm-mips/sparsemem.h14
-rw-r--r--include/asm-powerpc/cputable.h2
-rw-r--r--include/asm-s390/futex.h15
-rw-r--r--include/asm-s390/lowcore.h4
-rw-r--r--include/asm-sparc64/pgtable.h17
-rw-r--r--include/asm-um/irqflags.h6
-rw-r--r--include/asm-um/uaccess.h6
-rw-r--r--include/asm-x86_64/elf.h2
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/m48t86.h4
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h10
-rw-r--r--include/linux/vt_kern.h5
-rw-r--r--include/net/compat.h3
-rw-r--r--include/net/ieee80211.h9
-rw-r--r--include/net/ieee80211softmac.h40
-rw-r--r--include/net/ieee80211softmac_wx.h5
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/posix-cpu-timers.c48
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/printk.c28
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab.c27
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/bridge/br_if.c19
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/ethernet/Makefile1
-rw-r--r--net/ethernet/sysctl_net_ether.c14
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c11
-rw-r--r--net/ieee80211/ieee80211_rx.c18
-rw-r--r--net/ieee80211/ieee80211_tx.c88
-rw-r--r--net/ieee80211/ieee80211_wx.c44
-rw-r--r--net/ieee80211/softmac/Kconfig1
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c96
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c15
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c30
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c169
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c117
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h5
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c36
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/tcp_highspeed.c3
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/irda/irlap.c3
-rw-r--r--net/sysctl_net.c8
-rw-r--r--security/selinux/hooks.c6
339 files changed, 15585 insertions, 7454 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c61d8b876fdb..4710845dbac4 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -19,6 +19,7 @@ Contents:
- Control dependencies.
- SMP barrier pairing.
- Examples of memory barrier sequences.
+ - Read memory barriers vs load speculation.
(*) Explicit kernel barriers.
@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
we may get either of:
STORE *A = X; Y = LOAD *A;
- STORE *A = Y;
+ STORE *A = Y = X;
=========================
@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
(4) General memory barriers.
- A general memory barrier is a combination of both a read memory barrier
- and a write memory barrier. It is a partial ordering over both loads and
- stores.
+ A general memory barrier gives a guarantee that all the LOAD and STORE
+ operations specified before the barrier will appear to happen before all
+ the LOAD and STORE operations specified after the barrier with respect to
+ the other components of the system.
+
+ A general memory barrier is a partial ordering over both loads and stores.
General memory barriers imply both read and write memory barriers, and so
can substitute for either.
@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
=============== ===============
a = 1;
<write barrier>
- b = 2; x = a;
+ b = 2; x = b;
<read barrier>
- y = b;
+ y = a;
Or:
@@ -563,6 +567,18 @@ Or:
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
+[!] Note that the stores before the write barrier would normally be expected to
+match the loads after the read barrier or data dependency barrier, and vice
+versa:
+
+ CPU 1 CPU 2
+ =============== ===============
+ a = 1; }---- --->{ v = c
+ b = 2; } \ / { w = d
+ <write barrier> \ <read barrier>
+ c = 3; } / \ { x = a;
+ d = 4; }---- --->{ y = b;
+
EXAMPLES OF MEMORY BARRIER SEQUENCES
------------------------------------
@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
| | +------+
+-------+ : :
|
- | Sequence in which stores committed to memory system
- | by CPU 1
+ | Sequence in which stores are committed to the
+ | memory system by CPU 1
V
@@ -683,14 +699,12 @@ then the following will occur:
| : : | |
| : : | CPU 2 |
| +-------+ | |
- \ | X->9 |------>| |
- \ +-------+ | |
- ----->| B->2 | | |
- +-------+ | |
- Makes sure all effects ---> ddddddddddddddddd | |
- prior to the store of C +-------+ | |
- are perceptible to | B->2 |------>| |
- successive loads +-------+ | |
+ | | X->9 |------>| |
+ | +-------+ | |
+ Makes sure all effects ---> \ ddddddddddddddddd | |
+ prior to the store of C \ +-------+ | |
+ are perceptible to ----->| B->2 |------>| |
+ subsequent loads +-------+ | |
: : +-------+
@@ -699,73 +713,239 @@ following sequence of events:
CPU 1 CPU 2
======================= =======================
+ { A = 0, B = 9 }
STORE A=1
- STORE B=2
- STORE C=3
<write barrier>
- STORE D=4
- STORE E=5
- LOAD A
+ STORE B=2
LOAD B
- LOAD C
- LOAD D
- LOAD E
+ LOAD A
Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
some effectively random order, despite the write barrier issued by CPU 1:
- +-------+ : :
- | | +------+
- | |------>| C=3 | }
- | | : +------+ }
- | | : | A=1 | }
- | | : +------+ }
- | CPU 1 | : | B=2 | }---
- | | +------+ } \
- | | wwwwwwwwwwwww} \
- | | +------+ } \ : : +-------+
- | | : | E=5 | } \ +-------+ | |
- | | : +------+ } \ { | C->3 |------>| |
- | |------>| D=4 | } \ { +-------+ : | |
- | | +------+ \ { | E->5 | : | |
- +-------+ : : \ { +-------+ : | |
- Transfer -->{ | A->1 | : | CPU 2 |
- from CPU 1 { +-------+ : | |
- to CPU 2 { | D->4 | : | |
- { +-------+ : | |
- { | B->2 |------>| |
- +-------+ | |
- : : +-------+
-
-
-If, however, a read barrier were to be placed between the load of C and the
-load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
-perceived correctly by CPU 2.
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | | A->0 |------>| |
+ | +-------+ | |
+ | : : +-------+
+ \ : :
+ \ +-------+
+ ---->| A->1 |
+ +-------+
+ : :
- +-------+ : :
- | | +------+
- | |------>| C=3 | }
- | | : +------+ }
- | | : | A=1 | }---
- | | : +------+ } \
- | CPU 1 | : | B=2 | } \
- | | +------+ \
- | | wwwwwwwwwwwwwwww \
- | | +------+ \ : : +-------+
- | | : | E=5 | } \ +-------+ | |
- | | : +------+ }--- \ { | C->3 |------>| |
- | |------>| D=4 | } \ \ { +-------+ : | |
- | | +------+ \ -->{ | B->2 | : | |
- +-------+ : : \ { +-------+ : | |
- \ { | A->1 | : | CPU 2 |
- \ +-------+ | |
- At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
- barrier causes all effects \ +-------+ | |
- prior to the storage of C \ { | E->5 | : | |
- to be perceptible to CPU 2 -->{ +-------+ : | |
- { | D->4 |------>| |
- +-------+ | |
- : : +-------+
+
+If, however, a read barrier were to be placed between the load of E and the
+load of A on CPU 2:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ { A = 0, B = 9 }
+ STORE A=1
+ <write barrier>
+ STORE B=2
+ LOAD B
+ <read barrier>
+ LOAD A
+
+then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
+2:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ | : : | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of B ---->| A->1 |------>| |
+ to be perceptible to CPU 2 +-------+ | |
+ : : +-------+
+
+
+To illustrate this more completely, consider what could happen if the code
+contained a load of A either side of the read barrier:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ { A = 0, B = 9 }
+ STORE A=1
+ <write barrier>
+ STORE B=2
+ LOAD B
+ LOAD A [first load of A]
+ <read barrier>
+ LOAD A [second load of A]
+
+Even though the two loads of A both occur after the load of B, they may both
+come up with different values:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ | : : | |
+ | +-------+ | |
+ | | A->0 |------>| 1st |
+ | +-------+ | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of B ---->| A->1 |------>| 2nd |
+ to be perceptible to CPU 2 +-------+ | |
+ : : +-------+
+
+
+But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
+before the read barrier completes anyway:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ \ : : | |
+ \ +-------+ | |
+ ---->| A->1 |------>| 1st |
+ +-------+ | |
+ rrrrrrrrrrrrrrrrr | |
+ +-------+ | |
+ | A->1 |------>| 2nd |
+ +-------+ | |
+ : : +-------+
+
+
+The guarantee is that the second load will always come up with A == 1 if the
+load of B came up with B == 2. No such guarantee exists for the first load of
+A; that may come up with either A == 0 or A == 1.
+
+
+READ MEMORY BARRIERS VS LOAD SPECULATION
+----------------------------------------
+
+Many CPUs speculate with loads: that is they see that they will need to load an
+item from memory, and they find a time where they're not using the bus for any
+other loads, and so do the load in advance - even though they haven't actually
+got to that point in the instruction execution flow yet. This permits the
+actual load instruction to potentially complete immediately because the CPU
+already has the value to hand.
+
+It may turn out that the CPU didn't actually need the value - perhaps because a
+branch circumvented the load - in which case it can discard the value or just
+cache it for later use.
+
+Consider:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ LOAD B
+ DIVIDE } Divide instructions generally
+ DIVIDE } take a long time to perform
+ LOAD A
+
+Which might appear as this:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ Once the divisions are complete --> : : ~-->| |
+ the CPU can then perform the : : | |
+ LOAD with immediate effect : : +-------+
+
+
+Placing a read barrier or a data dependency barrier just before the second
+load:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ LOAD B
+ DIVIDE
+ DIVIDE
+ <read barrier>
+ LOAD A
+
+will force any value speculatively obtained to be reconsidered to an extent
+dependent on the type of barrier used. If there was no change made to the
+speculated memory location, then the speculated value will just be used:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ : : ~ | |
+ rrrrrrrrrrrrrrrr~ | |
+ : : ~ | |
+ : : ~-->| |
+ : : | |
+ : : +-------+
+
+
+but if there was an update or an invalidation from another CPU pending, then
+the speculation will be cancelled and the value reloaded:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ : : ~ | |
+ rrrrrrrrrrrrrrrrr | |
+ +-------+ | |
+ The speculation is discarded ---> --->| A->1 |------>| |
+ and an updated value is +-------+ | |
+ retrieved : : +-------+
========================
@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
===============================
Some of the other functions in the linux kernel imply memory barriers, amongst
-which are locking, scheduling and memory allocation functions.
+which are locking and scheduling functions.
This specification is a _minimum_ guarantee; any particular architecture may
provide more substantial guarantees, but these may not be relied upon outside
@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
barriers is that the effects instructions outside of a critical section may
seep into the inside of the critical section.
+A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
+because it is possible for an access preceding the LOCK to happen after the
+LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
+two accesses can themselves then cross:
+
+ *A = a;
+ LOCK
+ UNLOCK
+ *B = b;
+
+may occur as:
+
+ LOCK, STORE *B, STORE *A, UNLOCK
+
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
anything at all - especially with respect to I/O accesses - unless combined
@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
(*) schedule() and similar imply full memory barriers.
- (*) Memory allocation and release functions imply full memory barriers.
-
=================================
INTER-CPU LOCKING BARRIER EFFECTS
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
index acb30c5dcff3..4f2a40f1dbc6 100644
--- a/Documentation/networking/README.ipw2200
+++ b/Documentation/networking/README.ipw2200
@@ -14,8 +14,8 @@ Copyright (C) 2004-2006, Intel Corporation
README.ipw2200
-Version: 1.0.8
-Date : October 20, 2005
+Version: 1.1.2
+Date : March 30, 2006
Index
@@ -103,7 +103,7 @@ file.
1.1. Overview of Features
-----------------------------------------------
-The current release (1.0.8) supports the following features:
+The current release (1.1.2) supports the following features:
+ BSS mode (Infrastructure, Managed)
+ IBSS mode (Ad-Hoc)
@@ -247,8 +247,8 @@ and can set the contents via echo. For example:
% cat /sys/bus/pci/drivers/ipw2200/debug_level
Will report the current debug level of the driver's logging subsystem
-(only available if CONFIG_IPW_DEBUG was configured when the driver was
-built).
+(only available if CONFIG_IPW2200_DEBUG was configured when the driver
+was built).
You can set the debug level via:
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 8d8b4e5ea184..afac780445cd 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1,7 +1,7 @@
Linux Ethernet Bonding Driver HOWTO
- Latest update: 21 June 2005
+ Latest update: 24 April 2006
Initial release : Thomas Davis <tadavis at lbl.gov>
Corrections, HA extensions : 2000/10/03-15 :
@@ -12,6 +12,8 @@ Corrections, HA extensions : 2000/10/03-15 :
- Jay Vosburgh <fubar at us dot ibm dot com>
Reorganized and updated Feb 2005 by Jay Vosburgh
+Added Sysfs information: 2006/04/24
+ - Mitch Williams <mitch.a.williams at intel.com>
Introduction
============
@@ -38,61 +40,62 @@ Table of Contents
2. Bonding Driver Options
3. Configuring Bonding Devices
-3.1 Configuration with sysconfig support
-3.1.1 Using DHCP with sysconfig
-3.1.2 Configuring Multiple Bonds with sysconfig
-3.2 Configuration with initscripts support
-3.2.1 Using DHCP with initscripts
-3.2.2 Configuring Multiple Bonds with initscripts
-3.3 Configuring Bonding Manually
+3.1 Configuration with Sysconfig Support
+3.1.1 Using DHCP with Sysconfig
+3.1.2 Configuring Multiple Bonds with Sysconfig
+3.2 Configuration with Initscripts Support
+3.2.1 Using DHCP with Initscripts
+3.2.2 Configuring Multiple Bonds with Initscripts
+3.3 Configuring Bonding Manually with Ifenslave
3.3.1 Configuring Multiple Bonds Manually
+3.4 Configuring Bonding Manually via Sysfs
-5. Querying Bonding Configuration
-5.1 Bonding Configuration
-5.2 Network Configuration
+4. Querying Bonding Configuration
+4.1 Bonding Configuration
+4.2 Network Configuration
-6. Switch Configuration
+5. Switch Configuration
-7. 802.1q VLAN Support
+6. 802.1q VLAN Support
-8. Link Monitoring
-8.1 ARP Monitor Operation
-8.2 Configuring Multiple ARP Targets
-8.3 MII Monitor Operation
+7. Link Monitoring
+7.1 ARP Monitor Operation
+7.2 Configuring Multiple ARP Targets
+7.3 MII Monitor Operation
-9. Potential Trouble Sources
-9.1 Adventures in Routing
-9.2 Ethernet Device Renaming
-9.3 Painfully Slow Or No Failed Link Detection By Miimon
+8. Potential Trouble Sources
+8.1 Adventures in Routing
+8.2 Ethernet Device Renaming
+8.3 Painfully Slow Or No Failed Link Detection By Miimon
-10. SNMP agents
+9. SNMP agents
-11. Promiscuous mode
+10. Promiscuous mode
-12. Configuring Bonding for High Availability
-12.1 High Availability in a Single Switch Topology
-12.2 High Availability in a Multiple Switch Topology
-12.2.1 HA Bonding Mode Selection for Multiple Switch Topology
-12.2.2 HA Link Monitoring for Multiple Switch Topology
+11. Configuring Bonding for High Availability
+11.1 High Availability in a Single Switch Topology
+11.2 High Availability in a Multiple Switch Topology
+11.2.1 HA Bonding Mode Selection for Multiple Switch Topology
+11.2.2 HA Link Monitoring for Multiple Switch Topology
-13. Configuring Bonding for Maximum Throughput
-13.1 Maximum Throughput in a Single Switch Topology
-13.1.1 MT Bonding Mode Selection for Single Switch Topology
-13.1.2 MT Link Monitoring for Single Switch Topology
-13.2 Maximum Throughput in a Multiple Switch Topology
-13.2.1 MT Bonding Mode Selection for Multiple Switch Topology
-13.2.2 MT Link Monitoring for Multiple Switch Topology
+12. Configuring Bonding for Maximum Throughput
+12.1 Maximum Throughput in a Single Switch Topology
+12.1.1 MT Bonding Mode Selection for Single Switch Topology
+12.1.2 MT Link Monitoring for Single Switch Topology
+12.2 Maximum Throughput in a Multiple Switch Topology
+12.2.1 MT Bonding Mode Selection for Multiple Switch Topology
+12.2.2 MT Link Monitoring for Multiple Switch Topology
-14. Switch Behavior Issues
-14.1 Link Establishment and Failover Delays
-14.2 Duplicated Incoming Packets
+13. Switch Behavior Issues
+13.1 Link Establishment and Failover Delays
+13.2 Duplicated Incoming Packets
-15. Hardware Specific Considerations
-15.1 IBM BladeCenter
+14. Hardware Specific Considerations
+14.1 IBM BladeCenter
-16. Frequently Asked Questions
+15. Frequently Asked Questions
-17. Resources and Links
+16. Resources and Links
1. Bonding Driver Installation
@@ -156,6 +159,9 @@ you're trying to build it for. Some distros (e.g., Red Hat from 7.1
onwards) do not have /usr/include/linux symbolically linked to the
default kernel source include directory.
+SECOND IMPORTANT NOTE:
+ If you plan to configure bonding using sysfs, you do not need
+to use ifenslave.
2. Bonding Driver Options
=========================
@@ -270,7 +276,7 @@ mode
In bonding version 2.6.2 or later, when a failover
occurs in active-backup mode, bonding will issue one
or more gratuitous ARPs on the newly active slave.
- One gratutious ARP is issued for the bonding master
+ One gratuitous ARP is issued for the bonding master
interface and each VLAN interfaces configured above
it, provided that the interface has at least one IP
address configured. Gratuitous ARPs issued for VLAN
@@ -377,7 +383,7 @@ mode
When a link is reconnected or a new slave joins the
bond the receive traffic is redistributed among all
active slaves in the bond by initiating ARP Replies
- with the selected mac address to each of the
+ with the selected MAC address to each of the
clients. The updelay parameter (detailed below) must
be set to a value equal or greater than the switch's
forwarding delay so that the ARP Replies sent to the
@@ -498,11 +504,12 @@ not exist, and the layer2 policy is the only policy.
3. Configuring Bonding Devices
==============================
- There are, essentially, two methods for configuring bonding:
-with support from the distro's network initialization scripts, and
-without. Distros generally use one of two packages for the network
-initialization scripts: initscripts or sysconfig. Recent versions of
-these packages have support for bonding, while older versions do not.
+ You can configure bonding using either your distro's network
+initialization scripts, or manually using either ifenslave or the
+sysfs interface. Distros generally use one of two packages for the
+network initialization scripts: initscripts or sysconfig. Recent
+versions of these packages have support for bonding, while older
+versions do not.
We will first describe the options for configuring bonding for
distros using versions of initscripts and sysconfig with full or
@@ -530,7 +537,7 @@ $ grep ifenslave /sbin/ifup
If this returns any matches, then your initscripts or
sysconfig has support for bonding.
-3.1 Configuration with sysconfig support
+3.1 Configuration with Sysconfig Support
----------------------------------------
This section applies to distros using a version of sysconfig
@@ -538,7 +545,7 @@ with bonding support, for example, SuSE Linux Enterprise Server 9.
SuSE SLES 9's networking configuration system does support
bonding, however, at this writing, the YaST system configuration
-frontend does not provide any means to work with bonding devices.
+front end does not provide any means to work with bonding devices.
Bonding devices can be managed by hand, however, as follows.
First, if they have not already been configured, configure the
@@ -660,7 +667,7 @@ format can be found in an example ifcfg template file:
Note that the template does not document the various BONDING_
settings described above, but does describe many of the other options.
-3.1.1 Using DHCP with sysconfig
+3.1.1 Using DHCP with Sysconfig
-------------------------------
Under sysconfig, configuring a device with BOOTPROTO='dhcp'
@@ -670,7 +677,7 @@ attempt to obtain the device address from DHCP prior to adding any of
the slave devices. Without active slaves, the DHCP requests are not
sent to the network.
-3.1.2 Configuring Multiple Bonds with sysconfig
+3.1.2 Configuring Multiple Bonds with Sysconfig
-----------------------------------------------
The sysconfig network initialization system is capable of
@@ -685,7 +692,7 @@ ifcfg-bondX files.
options in the ifcfg-bondX file, it is not necessary to add them to
the system /etc/modules.conf or /etc/modprobe.conf configuration file.
-3.2 Configuration with initscripts support
+3.2 Configuration with Initscripts Support
------------------------------------------
This section applies to distros using a version of initscripts
@@ -756,7 +763,7 @@ options for your configuration.
will restart the networking subsystem and your bond link should be now
up and running.
-3.2.1 Using DHCP with initscripts
+3.2.1 Using DHCP with Initscripts
---------------------------------
Recent versions of initscripts (the version supplied with
@@ -768,7 +775,7 @@ above, except replace the line "BOOTPROTO=none" with "BOOTPROTO=dhcp"
and add a line consisting of "TYPE=Bonding". Note that the TYPE value
is case sensitive.
-3.2.2 Configuring Multiple Bonds with initscripts
+3.2.2 Configuring Multiple Bonds with Initscripts
-------------------------------------------------
At this writing, the initscripts package does not directly
@@ -784,8 +791,8 @@ Fedora Core kernels, and has been seen on RHEL 4 as well. On kernels
exhibiting this problem, it will be impossible to configure multiple
bonds with differing parameters.
-3.3 Configuring Bonding Manually
---------------------------------
+3.3 Configuring Bonding Manually with Ifenslave
+-----------------------------------------------
This section applies to distros whose network initialization
scripts (the sysconfig or initscripts package) do not have specific
@@ -889,11 +896,139 @@ install bond1 /sbin/modprobe --ignore-install bonding -o bond1 \
This may be repeated any number of times, specifying a new and
unique name in place of bond1 for each subsequent instance.
+3.4 Configuring Bonding Manually via Sysfs
+------------------------------------------
+
+ Starting with version 3.0, Channel Bonding may be configured
+via the sysfs interface. This interface allows dynamic configuration
+of all bonds in the system without unloading the module. It also
+allows for adding and removing bonds at runtime. Ifenslave is no
+longer required, though it is still supported.
+
+ Use of the sysfs interface allows you to use multiple bonds
+with different configurations without having to reload the module.
+It also allows you to use multiple, differently configured bonds when
+bonding is compiled into the kernel.
+
+ You must have the sysfs filesystem mounted to configure
+bonding this way. The examples in this document assume that you
+are using the standard mount point for sysfs, e.g. /sys. If your
+sysfs filesystem is mounted elsewhere, you will need to adjust the
+example paths accordingly.
+
+Creating and Destroying Bonds
+-----------------------------
+To add a new bond foo:
+# echo +foo > /sys/class/net/bonding_masters
+
+To remove an existing bond bar:
+# echo -bar > /sys/class/net/bonding_masters
+
+To show all existing bonds:
+# cat /sys/class/net/bonding_masters
+
+NOTE: due to 4K size limitation of sysfs files, this list may be
+truncated if you have more than a few hundred bonds. This is unlikely
+to occur under normal operating conditions.
+
+Adding and Removing Slaves
+--------------------------
+ Interfaces may be enslaved to a bond using the file
+/sys/class/net/<bond>/bonding/slaves. The semantics for this file
+are the same as for the bonding_masters file.
+
+To enslave interface eth0 to bond bond0:
+# ifconfig bond0 up
+# echo +eth0 > /sys/class/net/bond0/bonding/slaves
+
+To free slave eth0 from bond bond0:
+# echo -eth0 > /sys/class/net/bond0/bonding/slaves
+
+ NOTE: The bond must be up before slaves can be added. All
+slaves are freed when the interface is brought down.
+
+ When an interface is enslaved to a bond, symlinks between the
+two are created in the sysfs filesystem. In this case, you would get
+/sys/class/net/bond0/slave_eth0 pointing to /sys/class/net/eth0, and
+/sys/class/net/eth0/master pointing to /sys/class/net/bond0.
+
+ This means that you can tell quickly whether or not an
+interface is enslaved by looking for the master symlink. Thus:
+# echo -eth0 > /sys/class/net/eth0/master/bonding/slaves
+will free eth0 from whatever bond it is enslaved to, regardless of
+the name of the bond interface.
+
+Changing a Bond's Configuration
+-------------------------------
+ Each bond may be configured individually by manipulating the
+files located in /sys/class/net/<bond name>/bonding
+
+ The names of these files correspond directly with the command-
+line parameters described elsewhere in in this file, and, with the
+exception of arp_ip_target, they accept the same values. To see the
+current setting, simply cat the appropriate file.
+
+ A few examples will be given here; for specific usage
+guidelines for each parameter, see the appropriate section in this
+document.
+
+To configure bond0 for balance-alb mode:
+# ifconfig bond0 down
+# echo 6 > /sys/class/net/bond0/bonding/mode
+ - or -
+# echo balance-alb > /sys/class/net/bond0/bonding/mode
+ NOTE: The bond interface must be down before the mode can be
+changed.
+
+To enable MII monitoring on bond0 with a 1 second interval:
+# echo 1000 > /sys/class/net/bond0/bonding/miimon
+ NOTE: If ARP monitoring is enabled, it will disabled when MII
+monitoring is enabled, and vice-versa.
+
+To add ARP targets:
+# echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
+# echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target
+ NOTE: up to 10 target addresses may be specified.
+
+To remove an ARP target:
+# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
+
+Example Configuration
+---------------------
+ We begin with the same example that is shown in section 3.3,
+executed with sysfs, and without using ifenslave.
+
+ To make a simple bond of two e100 devices (presumed to be eth0
+and eth1), and have it persist across reboots, edit the appropriate
+file (/etc/init.d/boot.local or /etc/rc.d/rc.local), and add the
+following:
+
+modprobe bonding
+modprobe e100
+echo balance-alb > /sys/class/net/bond0/bonding/mode
+ifconfig bond0 192.168.1.1 netmask 255.255.255.0 up
+echo 100 > /sys/class/net/bond0/bonding/miimon
+echo +eth0 > /sys/class/net/bond0/bonding/slaves
+echo +eth1 > /sys/class/net/bond0/bonding/slaves
+
+ To add a second bond, with two e1000 interfaces in
+active-backup mode, using ARP monitoring, add the following lines to
+your init script:
+
+modprobe e1000
+echo +bond1 > /sys/class/net/bonding_masters
+echo active-backup > /sys/class/net/bond1/bonding/mode
+ifconfig bond1 192.168.2.1 netmask 255.255.255.0 up
+echo +192.168.2.100 /sys/class/net/bond1/bonding/arp_ip_target
+echo 2000 > /sys/class/net/bond1/bonding/arp_interval
+echo +eth2 > /sys/class/net/bond1/bonding/slaves
+echo +eth3 > /sys/class/net/bond1/bonding/slaves
+
-5. Querying Bonding Configuration
+4. Querying Bonding Configuration
=================================
-5.1 Bonding Configuration
+4.1 Bonding Configuration
-------------------------
Each bonding device has a read-only file residing in the
@@ -923,7 +1058,7 @@ generally as follows:
The precise format and contents will change depending upon the
bonding configuration, state, and version of the bonding driver.
-5.2 Network configuration
+4.2 Network configuration
-------------------------
The network configuration can be inspected using the ifconfig
@@ -958,7 +1093,7 @@ eth1 Link encap:Ethernet HWaddr 00:C0:F0:1F:37:B4
collisions:0 txqueuelen:100
Interrupt:9 Base address:0x1400
-6. Switch Configuration
+5. Switch Configuration
=======================
For this section, "switch" refers to whatever system the
@@ -991,7 +1126,7 @@ transmit policy for an EtherChannel group; all three will interoperate
with another EtherChannel group.
-7. 802.1q VLAN Support
+6. 802.1q VLAN Support
======================
It is possible to configure VLAN devices over a bond interface
@@ -1042,7 +1177,7 @@ underlying device -- i.e. the bonding interface -- to promiscuous
mode, which might not be what you want.
-8. Link Monitoring
+7. Link Monitoring
==================
The bonding driver at present supports two schemes for
@@ -1053,7 +1188,7 @@ monitor.
bonding driver itself, it is not possible to enable both ARP and MII
monitoring simultaneously.
-8.1 ARP Monitor Operation
+7.1 ARP Monitor Operation
-------------------------
The ARP monitor operates as its name suggests: it sends ARP
@@ -1071,7 +1206,7 @@ those slaves will stay down. If networking monitoring (tcpdump, etc)
shows the ARP requests and replies on the network, then it may be that
your device driver is not updating last_rx and trans_start.
-8.2 Configuring Multiple ARP Targets
+7.2 Configuring Multiple ARP Targets
------------------------------------
While ARP monitoring can be done with just one target, it can
@@ -1094,7 +1229,7 @@ alias bond0 bonding
options bond0 arp_interval=60 arp_ip_target=192.168.0.100
-8.3 MII Monitor Operation
+7.3 MII Monitor Operation
-------------------------
The MII monitor monitors only the carrier state of the local
@@ -1120,14 +1255,14 @@ does not support or had some error in processing both the MII register
and ethtool requests), then the MII monitor will assume the link is
up.
-9. Potential Sources of Trouble
+8. Potential Sources of Trouble
===============================
-9.1 Adventures in Routing
+8.1 Adventures in Routing
-------------------------
When bonding is configured, it is important that the slave
-devices not have routes that supercede routes of the master (or,
+devices not have routes that supersede routes of the master (or,
generally, not have routes at all). For example, suppose the bonding
device bond0 has two slaves, eth0 and eth1, and the routing table is
as follows:
@@ -1154,11 +1289,11 @@ by the state of the routing table.
The solution here is simply to insure that slaves do not have
routes of their own, and if for some reason they must, those routes do
-not supercede routes of their master. This should generally be the
+not supersede routes of their master. This should generally be the
case, but unusual configurations or errant manual or automatic static
route additions may cause trouble.
-9.2 Ethernet Device Renaming
+8.2 Ethernet Device Renaming
----------------------------
On systems with network configuration scripts that do not
@@ -1207,7 +1342,7 @@ modprobe with --ignore-install to cause the normal action to then take
place. Full documentation on this can be found in the modprobe.conf
and modprobe manual pages.
-9.3. Painfully Slow Or No Failed Link Detection By Miimon
+8.3. Painfully Slow Or No Failed Link Detection By Miimon
---------------------------------------------------------
By default, bonding enables the use_carrier option, which
@@ -1235,7 +1370,7 @@ carrier state. It has no way to determine the state of devices on or
beyond other ports of a switch, or if a switch is refusing to pass
traffic while still maintaining carrier on.
-10. SNMP agents
+9. SNMP agents
===============
If running SNMP agents, the bonding driver should be loaded
@@ -1281,7 +1416,7 @@ ifDescr, the association between the IP address and IfIndex remains
and SNMP functions such as Interface_Scan_Next will report that
association.
-11. Promiscuous mode
+10. Promiscuous mode
====================
When running network monitoring tools, e.g., tcpdump, it is
@@ -1308,7 +1443,7 @@ sending to peers that are unassigned or if the load is unbalanced.
the active slave changes (e.g., due to a link failure), the
promiscuous setting will be propagated to the new active slave.
-12. Configuring Bonding for High Availability
+11. Configuring Bonding for High Availability
=============================================
High Availability refers to configurations that provide
@@ -1318,7 +1453,7 @@ goal is to provide the maximum availability of network connectivity
(i.e., the network always works), even though other configurations
could provide higher throughput.
-12.1 High Availability in a Single Switch Topology
+11.1 High Availability in a Single Switch Topology
--------------------------------------------------
If two hosts (or a host and a single switch) are directly
@@ -1332,7 +1467,7 @@ the load will be rebalanced across the remaining devices.
See Section 13, "Configuring Bonding for Maximum Throughput"
for information on configuring bonding with one peer device.
-12.2 High Availability in a Multiple Switch Topology
+11.2 High Availability in a Multiple Switch Topology
----------------------------------------------------
With multiple switches, the configuration of bonding and the
@@ -1359,7 +1494,7 @@ switches (ISL, or inter switch link), and multiple ports connecting to
the outside world ("port3" on each switch). There is no technical
reason that this could not be extended to a third switch.
-12.2.1 HA Bonding Mode Selection for Multiple Switch Topology
+11.2.1 HA Bonding Mode Selection for Multiple Switch Topology
-------------------------------------------------------------
In a topology such as the example above, the active-backup and
@@ -1381,7 +1516,7 @@ broadcast: This mode is really a special purpose mode, and is suitable
necessary for some specific one-way traffic to reach both
independent networks, then the broadcast mode may be suitable.
-12.2.2 HA Link Monitoring Selection for Multiple Switch Topology
+11.2.2 HA Link Monitoring Selection for Multiple Switch Topology
----------------------------------------------------------------
The choice of link monitoring ultimately depends upon your
@@ -1402,10 +1537,10 @@ regardless of which switch is active, the ARP monitor has a suitable
target to query.
-13. Configuring Bonding for Maximum Throughput
+12. Configuring Bonding for Maximum Throughput
==============================================
-13.1 Maximizing Throughput in a Single Switch Topology
+12.1 Maximizing Throughput in a Single Switch Topology
------------------------------------------------------
In a single switch configuration, the best method to maximize
@@ -1476,7 +1611,7 @@ destination to make load balancing decisions. The behavior of each
mode is described below.
-13.1.1 MT Bonding Mode Selection for Single Switch Topology
+12.1.1 MT Bonding Mode Selection for Single Switch Topology
-----------------------------------------------------------
This configuration is the easiest to set up and to understand,
@@ -1607,7 +1742,7 @@ balance-alb: This mode is everything that balance-tlb is, and more.
device driver must support changing the hardware address while
the device is open.
-13.1.2 MT Link Monitoring for Single Switch Topology
+12.1.2 MT Link Monitoring for Single Switch Topology
----------------------------------------------------
The choice of link monitoring may largely depend upon which
@@ -1616,7 +1751,7 @@ support the use of the ARP monitor, and are thus restricted to using
the MII monitor (which does not provide as high a level of end to end
assurance as the ARP monitor).
-13.2 Maximum Throughput in a Multiple Switch Topology
+12.2 Maximum Throughput in a Multiple Switch Topology
-----------------------------------------------------
Multiple switches may be utilized to optimize for throughput
@@ -1651,7 +1786,7 @@ a single 72 port switch.
can be equipped with an additional network device connected to an
external network; this host then additionally acts as a gateway.
-13.2.1 MT Bonding Mode Selection for Multiple Switch Topology
+12.2.1 MT Bonding Mode Selection for Multiple Switch Topology
-------------------------------------------------------------
In actual practice, the bonding mode typically employed in
@@ -1664,7 +1799,7 @@ packets has arrived). When employed in this fashion, the balance-rr
mode allows individual connections between two hosts to effectively
utilize greater than one interface's bandwidth.
-13.2.2 MT Link Monitoring for Multiple Switch Topology
+12.2.2 MT Link Monitoring for Multiple Switch Topology
------------------------------------------------------
Again, in actual practice, the MII monitor is most often used
@@ -1674,10 +1809,10 @@ advantages over the MII monitor are mitigated by the volume of probes
needed as the number of systems involved grows (remember that each
host in the network is configured with bonding).
-14. Switch Behavior Issues
+13. Switch Behavior Issues
==========================
-14.1 Link Establishment and Failover Delays
+13.1 Link Establishment and Failover Delays
-------------------------------------------
Some switches exhibit undesirable behavior with regard to the
@@ -1712,7 +1847,7 @@ switches take a long time to go into backup mode, it may be desirable
to not activate a backup interface immediately after a link goes down.
Failover may be delayed via the downdelay bonding module option.
-14.2 Duplicated Incoming Packets
+13.2 Duplicated Incoming Packets
--------------------------------
It is not uncommon to observe a short burst of duplicated
@@ -1751,14 +1886,14 @@ behavior, it can be induced by clearing the MAC forwarding table (on
most Cisco switches, the privileged command "clear mac address-table
dynamic" will accomplish this).
-15. Hardware Specific Considerations
+14. Hardware Specific Considerations
====================================
This section contains additional information for configuring
bonding on specific hardware platforms, or for interfacing bonding
with particular switches or other devices.
-15.1 IBM BladeCenter
+14.1 IBM BladeCenter
--------------------
This applies to the JS20 and similar systems.
@@ -1861,7 +1996,7 @@ bonding driver.
avoid fail-over delay issues when using bonding.
-16. Frequently Asked Questions
+15. Frequently Asked Questions
==============================
1. Is it SMP safe?
@@ -1925,7 +2060,7 @@ not have special switch requirements, but do need device drivers that
support specific features (described in the appropriate section under
module parameters, above).
- In 802.3ad mode, it works with with systems that support IEEE
+ In 802.3ad mode, it works with systems that support IEEE
802.3ad Dynamic Link Aggregation. Most managed and many unmanaged
switches currently available support 802.3ad.
diff --git a/Documentation/serial/driver b/Documentation/serial/driver
index df82116a9f26..88ad615dd338 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver
@@ -214,12 +214,13 @@ hardware.
The interaction of the iflag bits is as follows (parity error
given as an example):
Parity error INPCK IGNPAR
- None n/a n/a character received
- Yes n/a 0 character discarded
- Yes 0 1 character received, marked as
+ n/a 0 n/a character received, marked as
TTY_NORMAL
- Yes 1 1 character received, marked as
+ None 1 n/a character received, marked as
+ TTY_NORMAL
+ Yes 1 0 character received, marked as
TTY_PARITY
+ Yes 1 1 character discarded
Other flags may be used (eg, xon/xoff characters) if your
hardware supports hardware "soft" flow control.
diff --git a/MAINTAINERS b/MAINTAINERS
index 74d71cafb17c..1421f74b6009 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -568,6 +568,18 @@ L: linuxppc-dev@ozlabs.org
W: http://www.penguinppc.org/ppc64/
S: Supported
+BROADCOM BNX2 GIGABIT ETHERNET DRIVER
+P: Michael Chan
+M: mchan@broadcom.com
+L: netdev@vger.kernel.org
+S: Supported
+
+BROADCOM TG3 GIGABIT ETHERNET DRIVER
+P: Michael Chan
+M: mchan@broadcom.com
+L: netdev@vger.kernel.org
+S: Supported
+
BTTV VIDEO4LINUX DRIVER
P: Mauro Carvalho Chehab
M: mchehab@infradead.org
@@ -1413,6 +1425,8 @@ P: Jesse Brandeburg
M: jesse.brandeburg@intel.com
P: Jeff Kirsher
M: jeffrey.t.kirsher@intel.com
+P: Auke Kok
+M: auke-jan.h.kok@intel.com
W: http://sourceforge.net/projects/e1000/
S: Supported
@@ -1425,6 +1439,8 @@ P: Jesse Brandeburg
M: jesse.brandeburg@intel.com
P: Jeff Kirsher
M: jeffrey.t.kirsher@intel.com
+P: Auke Kok
+M: auke-jan.h.kok@intel.com
W: http://sourceforge.net/projects/e1000/
S: Supported
@@ -1437,6 +1453,8 @@ P: John Ronciak
M: john.ronciak@intel.com
P: Jesse Brandeburg
M: jesse.brandeburg@intel.com
+P: Auke Kok
+M: auke-jan.h.kok@intel.com
W: http://sourceforge.net/projects/e1000/
S: Supported
@@ -1877,6 +1895,11 @@ L: linux-kernel@vger.kernel.org
W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
S: Maintained
+MULTIMEDIA CARD SUBSYSTEM
+P: Russell King
+M: rmk+mmc@arm.linux.org.uk
+S: Maintained
+
MULTISOUND SOUND DRIVER
P: Andrew Veliath
M: andrewtv@usa.net
diff --git a/Makefile b/Makefile
index 435d209f42d8..1700d3f6ea22 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 17
-EXTRAVERSION =-rc5
-NAME=Lordi Rules
+EXTRAVERSION =
+NAME=Crazed Snow-Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 8290b69da202..213c7850d5fb 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -453,7 +453,7 @@ config ALPHA_IRONGATE
config GENERIC_HWEIGHT
bool
- default y if !ALPHA_EV6 && !ALPHA_EV67
+ default y if !ALPHA_EV67
config ALPHA_AVANTI
bool
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index c645c5e14786..2b245ad731ee 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -182,7 +182,6 @@ EXPORT_SYMBOL(smp_num_cpus);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_on_cpu);
EXPORT_SYMBOL(_atomic_dec_and_lock);
-EXPORT_SYMBOL(cpu_present_mask);
#endif /* CONFIG_SMP */
/*
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 9924fd07743a..c760a831fd1a 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -94,7 +94,7 @@ common_shutdown_1(void *generic_ptr)
if (cpuid != boot_cpuid) {
flags |= 0x00040000UL; /* "remain halted" */
*pflags = flags;
- clear_bit(cpuid, &cpu_present_mask);
+ cpu_clear(cpuid, cpu_present_map);
halt();
}
#endif
@@ -120,8 +120,8 @@ common_shutdown_1(void *generic_ptr)
#ifdef CONFIG_SMP
/* Wait for the secondaries to halt. */
- cpu_clear(boot_cpuid, cpu_possible_map);
- while (cpus_weight(cpu_possible_map))
+ cpu_clear(boot_cpuid, cpu_present_map);
+ while (cpus_weight(cpu_present_map))
barrier();
#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 185255416e85..4dc273e537fd 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -68,7 +68,6 @@ enum ipi_message_type {
static int smp_secondary_alive __initdata = 0;
/* Which cpus ids came online. */
-cpumask_t cpu_present_mask;
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
@@ -439,7 +438,7 @@ setup_smp(void)
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
/* Assume here that "whami" == index */
- cpu_set(i, cpu_present_mask);
+ cpu_set(i, cpu_present_map);
cpu->pal_revision = boot_cpu_palrev;
}
@@ -450,11 +449,10 @@ setup_smp(void)
}
} else {
smp_num_probed = 1;
- cpu_set(boot_cpuid, cpu_present_mask);
}
- printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
- smp_num_probed, cpu_possible_map.bits[0]);
+ printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
+ smp_num_probed, cpu_present_map.bits[0]);
}
/*
@@ -473,7 +471,7 @@ smp_prepare_cpus(unsigned int max_cpus)
/* Nothing to do on a UP box, or when told not to. */
if (smp_num_probed == 1 || max_cpus == 0) {
- cpu_present_mask = cpumask_of_cpu(boot_cpuid);
+ cpu_present_map = cpumask_of_cpu(boot_cpuid);
printk(KERN_INFO "SMP mode deactivated.\n");
return;
}
@@ -486,10 +484,6 @@ smp_prepare_cpus(unsigned int max_cpus)
void __devinit
smp_prepare_boot_cpu(void)
{
- /*
- * Mark the boot cpu (current cpu) as online
- */
- cpu_set(smp_processor_id(), cpu_online_map);
}
int __devinit
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 5f84417eeb7b..2551fb49ae09 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -66,7 +66,7 @@ titan_update_irq_hw(unsigned long mask)
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
- cpumask_t cpm = cpu_present_mask;
+ cpumask_t cpm = cpu_present_map;
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 5d3acff8c596..d22f38b957db 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -101,7 +101,7 @@ config DEBUG_S3C2410_UART
help
Choice for UART for kernel low-level using S3C2410 UARTS,
should be between zero and two. The port must have been
- initalised by the boot-loader before use.
+ initialised by the boot-loader before use.
The uncompressor code port configuration is now handled
by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 9be01b0c3f48..e24566b88a78 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
}
}
-static unsigned char ts72xx_rtc_readb(unsigned long addr)
+static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
}
-static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
+static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
}
static struct m48t86_ops ts72xx_rtc_ops = {
- .readb = ts72xx_rtc_readb,
- .writeb = ts72xx_rtc_writeb,
+ .readbyte = ts72xx_rtc_readbyte,
+ .writebyte = ts72xx_rtc_writebyte,
};
static struct platform_device ts72xx_rtc_device = {
diff --git a/arch/arm/mach-imx/irq.c b/arch/arm/mach-imx/irq.c
index eeb8a6d4a399..a5de5f1da9f2 100644
--- a/arch/arm/mach-imx/irq.c
+++ b/arch/arm/mach-imx/irq.c
@@ -127,7 +127,7 @@ static void
imx_gpio_ack_irq(unsigned int irq)
{
DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
- ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32);
+ ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32);
}
static void
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index a0724f2b24ce..9f55f5ae1044 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -232,8 +232,6 @@ static void __init intcp_init_irq(void)
for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
if (i == 11)
i = 22;
- if (i == IRQ_CP_CPPLDINT)
- i++;
if (i == 29)
break;
set_irq_chip(i, &pic_chip);
@@ -259,8 +257,7 @@ static void __init intcp_init_irq(void)
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
- set_irq_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
- pic_unmask_irq(IRQ_CP_CPPLDINT);
+ set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
}
/*
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index 092ee12ced42..affd1d5d7440 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -178,8 +178,12 @@ static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
static void ixp23xx_irq_mask(unsigned int irq)
{
- volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
+ volatile unsigned long *intr_reg;
+ if (irq >= 56)
+ irq += 8;
+
+ intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg &= ~(1 << (irq % 32));
}
@@ -199,17 +203,25 @@ static void ixp23xx_irq_ack(unsigned int irq)
*/
static void ixp23xx_irq_level_unmask(unsigned int irq)
{
- volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
+ volatile unsigned long *intr_reg;
ixp23xx_irq_ack(irq);
+ if (irq >= 56)
+ irq += 8;
+
+ intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
static void ixp23xx_irq_edge_unmask(unsigned int irq)
{
- volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
+ volatile unsigned long *intr_reg;
+
+ if (irq >= 56)
+ irq += 8;
+ intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 2a39f9e481ad..3b23f43cb160 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -141,7 +141,7 @@ config IXP4XX_INDIRECT_PCI
2) If > 64MB of memory space is required, the IXP4xx can be
configured to use indirect registers to access PCI This allows
for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus.
- The disadvantadge of this is that every PCI access requires
+ The disadvantage of this is that every PCI access requires
three local register accesses plus a spinlock, but in some
cases the performance hit is acceptable. In addition, you cannot
mmap() PCI devices in this case due to the indirect nature
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 02e188d98e7d..b307f11951df 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -493,6 +493,7 @@ static void __init mainstone_map_io(void)
MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
/* Maintainer: MontaVista Software Inc. */
.phys_io = 0x40000000,
+ .boot_params = 0xa0000100, /* BLOB boot parameter setting */
.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
.map_io = mainstone_map_io,
.init_irq = mainstone_init_irq,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 19b372df544a..44bcb8097c7a 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -371,6 +371,7 @@ static int spitz_ohci_init(struct device *dev)
static struct pxaohci_platform_data spitz_ohci_platform_data = {
.port_mode = PMM_NPS_MODE,
.init = spitz_ohci_init,
+ .power_budget = 150,
};
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index ce7d81000695..970f98dadffc 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -170,7 +170,7 @@ config S3C2410_PM_DEBUG
depends on ARCH_S3C2410 && PM
help
Say Y here if you want verbose debugging from the PM Suspend and
- Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt`
+ Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
for more information.
config S3C2410_PM_CHECK
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 9e02bc3712a0..af6d2775cf82 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -59,6 +59,14 @@ neponset_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg
if (irr & (IRR_ETHERNET | IRR_USAR)) {
desc->chip->mask(irq);
+ /*
+ * Ack the interrupt now to prevent re-entering
+ * this neponset handler. Again, this is safe
+ * since we'll check the IRR register prior to
+ * leaving.
+ */
+ desc->chip->ack(irq);
+
if (irr & IRR_ETHERNET) {
d = irq_desc + IRQ_NEPONSET_SMC9196;
desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 799697d32dec..cebd48a3dae4 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -112,10 +112,9 @@ void __init versatile_init_irq(void)
{
unsigned int i;
- vic_init(VA_VIC_BASE, IRQ_VIC_START, ~(1 << 31));
+ vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0);
- set_irq_handler(IRQ_VICSOURCE31, sic_handle_irq);
- enable_irq(IRQ_VICSOURCE31);
+ set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq);
/* Do second interrupt controller */
writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index f14b2d0f3690..95273de4f772 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -376,7 +376,7 @@ void __init build_mem_type_table(void)
ecc_mask = 0;
}
- if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
+ if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
@@ -631,7 +631,7 @@ void setup_mm_for_reboot(char mode)
pgd = init_mm.pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
- if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
+ if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
base_pmdval |= PMD_BIT4;
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 80873b36c3f7..8d32e21fe151 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -427,12 +427,13 @@ __xsc3_setup:
#endif
mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
mrc p15, 0, r0, c1, c0, 0 @ get control register
- bic r0, r0, #0x0200 @ .... ..R. .... ....
bic r0, r0, #0x0002 @ .... .... .... ..A.
orr r0, r0, #0x0005 @ .... .... .... .C.M
#if BTB_ENABLE
+ bic r0, r0, #0x0200 @ .... ..R. .... ....
orr r0, r0, #0x3900 @ ..VI Z..S .... ....
#else
+ bic r0, r0, #0x0a00 @ .... Z.R. .... ....
orr r0, r0, #0x3100 @ ..VI ...S .... ....
#endif
#if L2_CACHE_ENABLE
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index daee69579b1c..40e5aba3ad3d 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -1066,14 +1066,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
},
},
- {
- .callback = disable_acpi_pci,
- .ident = "HP xw9300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
- },
- },
{}
};
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 2e3b643a4dc4..1649a175a206 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -5,17 +5,34 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+ nvidia_hpet_detected = 1;
+ return 0;
+}
+#endif
+
static int __init check_bridge(int vendor, int device)
{
#ifdef CONFIG_ACPI
- /* According to Nvidia all timer overrides are bogus. Just ignore
- them all. */
+ /* According to Nvidia all timer overrides are bogus unless HPET
+ is enabled. */
if (vendor == PCI_VENDOR_ID_NVIDIA) {
- acpi_skip_timer_override = 1;
+ nvidia_hpet_detected = 0;
+ acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
+ if (nvidia_hpet_detected == 0) {
+ acpi_skip_timer_override = 1;
+ }
}
#endif
if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 846e1639ef7c..dd6b0e3386ce 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled)
efi_map_memmap();
-#ifdef CONFIG_X86_IO_APIC
- check_acpi_pci(); /* Checks more than just ACPI actually */
-#endif
-
#ifdef CONFIG_ACPI
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+ check_acpi_pci(); /* Checks more than just ACPI actually */
+#endif
+
+#ifdef CONFIG_ACPI
acpi_boot_init();
#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
diff --git a/arch/i386/mach-generic/probe.c b/arch/i386/mach-generic/probe.c
index cea5b3ce4b57..d55fa7b187ab 100644
--- a/arch/i386/mach-generic/probe.c
+++ b/arch/i386/mach-generic/probe.c
@@ -93,9 +93,11 @@ int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) {
- genapic = apic_probe[i];
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic->name);
+ if (!cmdline_apic) {
+ genapic = apic_probe[i];
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ genapic->name);
+ }
return 1;
}
}
@@ -107,9 +109,11 @@ int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
- genapic = apic_probe[i];
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic->name);
+ if (!cmdline_apic) {
+ genapic = apic_probe[i];
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ genapic->name);
+ }
return 1;
}
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ee5fbb02b28f..e8ff09fe73d9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -13,7 +13,7 @@ choice
default SGI_IP22
config MIPS_MTX1
- bool "Support for 4G Systems MTX-1 board"
+ bool "4G Systems MTX-1 board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select SOC_AU1500
@@ -120,7 +120,7 @@ config MIPS_MIRAGE
select SYS_SUPPORTS_LITTLE_ENDIAN
config MIPS_COBALT
- bool "Support for Cobalt Server"
+ bool "Cobalt Server"
select DMA_NONCOHERENT
select HW_HAS_PCI
select I8259
@@ -132,7 +132,7 @@ config MIPS_COBALT
select SYS_SUPPORTS_LITTLE_ENDIAN
config MACH_DECSTATION
- bool "Support for DECstations"
+ bool "DECstations"
select BOOT_ELF32
select DMA_NONCOHERENT
select EARLY_PRINTK
@@ -158,7 +158,7 @@ config MACH_DECSTATION
otherwise choose R3000.
config MIPS_EV64120
- bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)"
+ bool "Galileo EV64120 Evaluation board (EXPERIMENTAL)"
depends on EXPERIMENTAL
select DMA_NONCOHERENT
select HW_HAS_PCI
@@ -175,7 +175,7 @@ config MIPS_EV64120
kernel for this platform.
config MIPS_EV96100
- bool "Support for Galileo EV96100 Evaluation board (EXPERIMENTAL)"
+ bool "Galileo EV96100 Evaluation board (EXPERIMENTAL)"
depends on EXPERIMENTAL
select DMA_NONCOHERENT
select HW_HAS_PCI
@@ -195,7 +195,7 @@ config MIPS_EV96100
here if you wish to build a kernel for this platform.
config MIPS_IVR
- bool "Support for Globespan IVR board"
+ bool "Globespan IVR board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select ITE_BOARD_GEN
@@ -211,7 +211,7 @@ config MIPS_IVR
build a kernel for this platform.
config MIPS_ITE8172
- bool "Support for ITE 8172G board"
+ bool "ITE 8172G board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select ITE_BOARD_GEN
@@ -228,7 +228,7 @@ config MIPS_ITE8172
a kernel for this platform.
config MACH_JAZZ
- bool "Support for the Jazz family of machines"
+ bool "Jazz family of machines"
select ARC
select ARC32
select ARCH_MAY_HAVE_PC_FDC
@@ -246,7 +246,7 @@ config MACH_JAZZ
Olivetti M700-10 workstations.
config LASAT
- bool "Support for LASAT Networks platforms"
+ bool "LASAT Networks platforms"
select DMA_NONCOHERENT
select HW_HAS_PCI
select MIPS_GT64120
@@ -258,7 +258,7 @@ config LASAT
select SYS_SUPPORTS_LITTLE_ENDIAN
config MIPS_ATLAS
- bool "Support for MIPS Atlas board"
+ bool "MIPS Atlas board"
select BOOT_ELF32
select DMA_NONCOHERENT
select IRQ_CPU
@@ -283,7 +283,7 @@ config MIPS_ATLAS
board.
config MIPS_MALTA
- bool "Support for MIPS Malta board"
+ bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
select HAVE_STD_PC_SERIAL_PORT
@@ -311,7 +311,7 @@ config MIPS_MALTA
board.
config MIPS_SEAD
- bool "Support for MIPS SEAD board (EXPERIMENTAL)"
+ bool "MIPS SEAD board (EXPERIMENTAL)"
depends on EXPERIMENTAL
select IRQ_CPU
select DMA_NONCOHERENT
@@ -328,7 +328,7 @@ config MIPS_SEAD
board.
config MIPS_SIM
- bool 'Support for MIPS simulator (MIPSsim)'
+ bool 'MIPS simulator (MIPSsim)'
select DMA_NONCOHERENT
select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
@@ -341,7 +341,7 @@ config MIPS_SIM
emulator.
config MOMENCO_JAGUAR_ATX
- bool "Support for Momentum Jaguar board"
+ bool "Momentum Jaguar board"
select BOOT_ELF32
select DMA_NONCOHERENT
select HW_HAS_PCI
@@ -361,7 +361,7 @@ config MOMENCO_JAGUAR_ATX
Momentum Computer <http://www.momenco.com/>.
config MOMENCO_OCELOT
- bool "Support for Momentum Ocelot board"
+ bool "Momentum Ocelot board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -378,7 +378,7 @@ config MOMENCO_OCELOT
Momentum Computer <http://www.momenco.com/>.
config MOMENCO_OCELOT_3
- bool "Support for Momentum Ocelot-3 board"
+ bool "Momentum Ocelot-3 board"
select BOOT_ELF32
select DMA_NONCOHERENT
select HW_HAS_PCI
@@ -397,7 +397,7 @@ config MOMENCO_OCELOT_3
PMC-Sierra Rm79000 core.
config MOMENCO_OCELOT_C
- bool "Support for Momentum Ocelot-C board"
+ bool "Momentum Ocelot-C board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -414,7 +414,7 @@ config MOMENCO_OCELOT_C
Momentum Computer <http://www.momenco.com/>.
config MOMENCO_OCELOT_G
- bool "Support for Momentum Ocelot-G board"
+ bool "Momentum Ocelot-G board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -431,23 +431,23 @@ config MOMENCO_OCELOT_G
Momentum Computer <http://www.momenco.com/>.
config MIPS_XXS1500
- bool "Support for MyCable XXS1500 board"
+ bool "MyCable XXS1500 board"
select DMA_NONCOHERENT
select SOC_AU1500
select SYS_SUPPORTS_LITTLE_ENDIAN
config PNX8550_V2PCI
- bool "Support for Philips PNX8550 based Viper2-PCI board"
+ bool "Philips PNX8550 based Viper2-PCI board"
select PNX8550
select SYS_SUPPORTS_LITTLE_ENDIAN
config PNX8550_JBS
- bool "Support for Philips PNX8550 based JBS board"
+ bool "Philips PNX8550 based JBS board"
select PNX8550
select SYS_SUPPORTS_LITTLE_ENDIAN
config DDB5074
- bool "Support for NEC DDB Vrc-5074 (EXPERIMENTAL)"
+ bool "NEC DDB Vrc-5074 (EXPERIMENTAL)"
depends on EXPERIMENTAL
select DDB5XXX_COMMON
select DMA_NONCOHERENT
@@ -465,7 +465,7 @@ config DDB5074
evaluation board.
config DDB5476
- bool "Support for NEC DDB Vrc-5476"
+ bool "NEC DDB Vrc-5476"
select DDB5XXX_COMMON
select DMA_NONCOHERENT
select HAVE_STD_PC_SERIAL_PORT
@@ -486,7 +486,7 @@ config DDB5476
IDE controller, PS2 keyboard, PS2 mouse, etc.
config DDB5477
- bool "Support for NEC DDB Vrc-5477"
+ bool "NEC DDB Vrc-5477"
select DDB5XXX_COMMON
select DMA_NONCOHERENT
select HW_HAS_PCI
@@ -504,13 +504,13 @@ config DDB5477
ether port USB, AC97, PCI, etc.
config MACH_VR41XX
- bool "Support for NEC VR4100 series based machines"
+ bool "NEC VR41XX-based machines"
select SYS_HAS_CPU_VR41XX
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
config PMC_YOSEMITE
- bool "Support for PMC-Sierra Yosemite eval board"
+ bool "PMC-Sierra Yosemite eval board"
select DMA_COHERENT
select HW_HAS_PCI
select IRQ_CPU
@@ -527,7 +527,7 @@ config PMC_YOSEMITE
manufactured by PMC-Sierra.
config QEMU
- bool "Support for Qemu"
+ bool "Qemu"
select DMA_COHERENT
select GENERIC_ISA_DMA
select HAVE_STD_PC_SERIAL_PORT
@@ -547,7 +547,7 @@ config QEMU
can be found at http://www.linux-mips.org/wiki/Qemu.
config SGI_IP22
- bool "Support for SGI IP22 (Indy/Indigo2)"
+ bool "SGI IP22 (Indy/Indigo2)"
select ARC
select ARC32
select BOOT_ELF32
@@ -567,7 +567,7 @@ config SGI_IP22
that runs on these, say Y here.
config SGI_IP27
- bool "Support for SGI IP27 (Origin200/2000)"
+ bool "SGI IP27 (Origin200/2000)"
select ARC
select ARC64
select BOOT_ELF64
@@ -583,7 +583,7 @@ config SGI_IP27
here.
config SGI_IP32
- bool "Support for SGI IP32 (O2) (EXPERIMENTAL)"
+ bool "SGI IP32 (O2) (EXPERIMENTAL)"
depends on EXPERIMENTAL
select ARC
select ARC32
@@ -604,7 +604,7 @@ config SGI_IP32
If you want this kernel to run on SGI O2 workstation, say Y here.
config SIBYTE_BIGSUR
- bool "Support for Sibyte BCM91480B-BigSur"
+ bool "Sibyte BCM91480B-BigSur"
select BOOT_ELF32
select DMA_COHERENT
select PCI_DOMAINS
@@ -615,7 +615,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_SWARM
- bool "Support for Sibyte BCM91250A-SWARM"
+ bool "Sibyte BCM91250A-SWARM"
select BOOT_ELF32
select DMA_COHERENT
select SIBYTE_SB1250
@@ -626,7 +626,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_SENTOSA
- bool "Support for Sibyte BCM91250E-Sentosa"
+ bool "Sibyte BCM91250E-Sentosa"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -637,7 +637,7 @@ config SIBYTE_SENTOSA
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_RHONE
- bool "Support for Sibyte BCM91125E-Rhone"
+ bool "Sibyte BCM91125E-Rhone"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -648,7 +648,7 @@ config SIBYTE_RHONE
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_CARMEL
- bool "Support for Sibyte BCM91120x-Carmel"
+ bool "Sibyte BCM91120x-Carmel"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -659,7 +659,7 @@ config SIBYTE_CARMEL
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_PTSWARM
- bool "Support for Sibyte BCM91250PT-PTSWARM"
+ bool "Sibyte BCM91250PT-PTSWARM"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -671,7 +671,7 @@ config SIBYTE_PTSWARM
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_LITTLESUR
- bool "Support for Sibyte BCM91250C2-LittleSur"
+ bool "Sibyte BCM91250C2-LittleSur"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -683,7 +683,7 @@ config SIBYTE_LITTLESUR
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_CRHINE
- bool "Support for Sibyte BCM91120C-CRhine"
+ bool "Sibyte BCM91120C-CRhine"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -694,7 +694,7 @@ config SIBYTE_CRHINE
select SYS_SUPPORTS_LITTLE_ENDIAN
config SIBYTE_CRHONE
- bool "Support for Sibyte BCM91125C-CRhone"
+ bool "Sibyte BCM91125C-CRhone"
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
@@ -706,7 +706,7 @@ config SIBYTE_CRHONE
select SYS_SUPPORTS_LITTLE_ENDIAN
config SNI_RM200_PCI
- bool "Support for SNI RM200 PCI"
+ bool "SNI RM200 PCI"
select ARC
select ARC32
select ARCH_MAY_HAVE_PC_FDC
@@ -732,7 +732,7 @@ config SNI_RM200_PCI
support this machine type.
config TOSHIBA_JMR3927
- bool "Support for Toshiba JMR-TX3927 board"
+ bool "Toshiba JMR-TX3927 board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select MIPS_TX3927
@@ -743,7 +743,7 @@ config TOSHIBA_JMR3927
select TOSHIBA_BOARDS
config TOSHIBA_RBTX4927
- bool "Support for Toshiba TBTX49[23]7 board"
+ bool "Toshiba TBTX49[23]7 board"
select DMA_NONCOHERENT
select HAS_TXX9_SERIAL
select HW_HAS_PCI
@@ -760,7 +760,7 @@ config TOSHIBA_RBTX4927
support this machine type
config TOSHIBA_RBTX4938
- bool "Support for Toshiba RBTX4938 board"
+ bool "Toshiba RBTX4938 board"
select HAVE_STD_PC_SERIAL_PORT
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
@@ -1411,13 +1411,12 @@ config PAGE_SIZE_8KB
config PAGE_SIZE_16KB
bool "16kB"
- depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX
+ depends on !CPU_R3000 && !CPU_TX39XX
help
Using 16kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available on
- all non-R3000 family processor. Not that at the time of this
- writing this option is still high experimental; there are also
- issues with compatibility of user applications.
+ all non-R3000 family processors. Note that you will need a suitable
+ Linux distribution to support this.
config PAGE_SIZE_64KB
bool "64kB"
@@ -1426,8 +1425,7 @@ config PAGE_SIZE_64KB
Using 64kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available on
all non-R3000 family processor. Not that at the time of this
- writing this option is still high experimental; there are also
- issues with compatibility of user applications.
+ writing this option is still high experimental.
endchoice
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index da61de776154..afe05ec12c27 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -68,6 +68,7 @@
extern void set_debug_traps(void);
extern irq_cpustat_t irq_stat [NR_CPUS];
+extern void mips_timer_interrupt(struct pt_regs *regs);
static void setup_local_irq(unsigned int irq, int type, int int_req);
static unsigned int startup_irq(unsigned int irq);
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 9c171afd9a53..ae7d8c57bf3f 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -1,10 +1,9 @@
/*
*
* BRIEF MODULE DESCRIPTION
- * PROM library initialisation code, assuming a version of
- * pmon is the boot code.
+ * PROM library initialisation code, assuming YAMON is the boot loader.
*
- * Copyright 2000,2001 MontaVista Software Inc.
+ * Copyright 2000, 2001, 2006 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
@@ -49,9 +48,9 @@ extern char **prom_argv, **prom_envp;
typedef struct
{
- char *name;
-/* char *val; */
-}t_env_var;
+ char *name;
+ char *val;
+} t_env_var;
char * prom_getcmdline(void)
@@ -85,21 +84,16 @@ char *prom_getenv(char *envname)
{
/*
* Return a pointer to the given environment variable.
- * Environment variables are stored in the form of "memsize=64".
*/
t_env_var *env = (t_env_var *)prom_envp;
- int i;
-
- i = strlen(envname);
- while(env->name) {
- if(strncmp(envname, env->name, i) == 0) {
- return(env->name + strlen(envname) + 1);
- }
+ while (env->name) {
+ if (strcmp(envname, env->name) == 0)
+ return env->val;
env++;
}
- return(NULL);
+ return NULL;
}
inline unsigned char str2hexnum(unsigned char c)
diff --git a/arch/mips/au1000/common/sleeper.S b/arch/mips/au1000/common/sleeper.S
index 44dac3b0df3b..683d9da84b66 100644
--- a/arch/mips/au1000/common/sleeper.S
+++ b/arch/mips/au1000/common/sleeper.S
@@ -112,6 +112,11 @@ sdsleep:
mtc0 k0, CP0_PAGEMASK
lw k0, 0x14(sp)
mtc0 k0, CP0_CONFIG
+
+ /* We need to catch the ealry Alchemy SOCs with
+ * the write-only Config[OD] bit and set it back to one...
+ */
+ jal au1x00_fixup_config_od
lw $1, PT_R1(sp)
lw $2, PT_R2(sp)
lw $3, PT_R3(sp)
diff --git a/arch/mips/au1000/common/time.c b/arch/mips/au1000/common/time.c
index f85f1524b366..f74d66a58a21 100644
--- a/arch/mips/au1000/common/time.c
+++ b/arch/mips/au1000/common/time.c
@@ -116,6 +116,7 @@ void mips_timer_interrupt(struct pt_regs *regs)
null:
ack_r4ktimer(0);
+ irq_exit();
}
#ifdef CONFIG_PM
diff --git a/arch/mips/ddb5xxx/ddb5476/dbg_io.c b/arch/mips/ddb5xxx/ddb5476/dbg_io.c
index 85e9e5013679..f2296a999953 100644
--- a/arch/mips/ddb5xxx/ddb5476/dbg_io.c
+++ b/arch/mips/ddb5xxx/ddb5476/dbg_io.c
@@ -86,7 +86,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/ddb5xxx/ddb5477/kgdb_io.c b/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
index 1d18d590495b..385bbdb10170 100644
--- a/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
+++ b/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
@@ -86,7 +86,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/gt64120/ev64120/serialGT.c b/arch/mips/gt64120/ev64120/serialGT.c
index 16e34a546e54..8f0d835491ff 100644
--- a/arch/mips/gt64120/ev64120/serialGT.c
+++ b/arch/mips/gt64120/ev64120/serialGT.c
@@ -149,7 +149,7 @@ void serial_set(int channel, unsigned long baud)
#else
/*
* Note: Set baud rate, hardcoded here for rate of 115200
- * since became unsure of above "buad rate" algorithm (??).
+ * since became unsure of above "baud rate" algorithm (??).
*/
outreg(channel, LCR, 0x83);
outreg(channel, DLM, 0x00); // See note above
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 8720bccfdea2..f0a6a38fcf4d 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/ite-boards/generic/dbg_io.c b/arch/mips/ite-boards/generic/dbg_io.c
index c4f8530fd07e..6a7ccaf93502 100644
--- a/arch/mips/ite-boards/generic/dbg_io.c
+++ b/arch/mips/ite-boards/generic/dbg_io.c
@@ -72,7 +72,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 92b28b674d6f..0facfaf4e950 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -272,8 +272,8 @@ void output_sc_defines(void)
text("/* Linux sigcontext offsets. */");
offset("#define SC_REGS ", struct sigcontext, sc_regs);
offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_MDHI ", struct sigcontext, sc_hi);
- offset("#define SC_MDLO ", struct sigcontext, sc_lo);
+ offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
+ offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
offset("#define SC_PC ", struct sigcontext, sc_pc);
offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
linefeed;
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 47a087b6c11b..d268827c62bd 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -206,7 +206,7 @@ static inline void check_daddi(void)
"daddi %0, %1, %3\n\t"
".set pop"
: "=r" (v), "=&r" (tmp)
- : "I" (0xffffffffffffdb9a), "I" (0x1234));
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
set_except_vector(12, handler);
local_irq_restore(flags);
@@ -224,7 +224,7 @@ static inline void check_daddi(void)
"dsrl %1, %1, 1\n\t"
"daddi %0, %1, %3"
: "=r" (v), "=&r" (tmp)
- : "I" (0xffffffffffffdb9a), "I" (0x1234));
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
set_except_vector(12, handler);
local_irq_restore(flags);
@@ -280,7 +280,7 @@ static inline void check_daddiu(void)
"daddu %1, %2\n\t"
".set pop"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
- : "I" (0xffffffffffffdb9a), "I" (0x1234));
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
if (v == w) {
printk("no.\n");
@@ -296,7 +296,7 @@ static inline void check_daddiu(void)
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
- : "I" (0xffffffffffffdb9a), "I" (0x1234));
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
if (v == w) {
printk("yes.\n");
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 58b3b14873cb..8c2c359a05f4 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -121,6 +121,7 @@ static inline void check_wait(void)
case CPU_24K:
case CPU_25KF:
case CPU_34K:
+ case CPU_74K:
case CPU_PR4450:
cpu_wait = r4k_wait;
printk(" available.\n");
@@ -432,6 +433,15 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
+ case PRID_IMP_R14000:
+ c->cputype = CPU_R14000;
+ c->isa_level = MIPS_CPU_ISA_IV;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ break;
}
}
@@ -593,6 +603,9 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
case PRID_IMP_34K:
c->cputype = CPU_34K;
break;
+ case PRID_IMP_74K:
+ c->cputype = CPU_74K;
+ break;
}
}
@@ -642,7 +655,7 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
/* FPU in pass1 is known to have issues. */
- if ((c->processor_id & 0xff) < 0x20)
+ if ((c->processor_id & 0xff) < 0x02)
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
break;
case PRID_IMP_SB1A:
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index d101d2fb24ca..a9c6de1b9542 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -101,7 +101,7 @@ FEXPORT(restore_all) # restore full frame
EMT
1:
mfc0 v1, CP0_TCSTATUS
- /* We set IXMT above, XOR should cler it here */
+ /* We set IXMT above, XOR should clear it here */
xori v1, v1, TCSTATUS_IXMT
or v1, v0, v1
mtc0 v1, CP0_TCSTATUS
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
index 10f28fb9f008..5fd7a8af0c62 100644
--- a/arch/mips/kernel/gdb-low.S
+++ b/arch/mips/kernel/gdb-low.S
@@ -54,9 +54,11 @@
*/
mfc0 k0, CP0_CAUSE
andi k0, k0, 0x7c
- add k1, k1, k0
- PTR_L k0, saved_vectors(k1)
- jr k0
+#ifdef CONFIG_64BIT
+ dsll k0, k0, 1
+#endif
+ PTR_L k1, saved_vectors(k0)
+ jr k1
nop
1:
move k0, sp
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index e54a7f442f8a..d7bf0215bc1d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -288,6 +288,9 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (!sym->st_value) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
@@ -325,6 +328,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (!sym->st_value) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 84ab959f924a..9def554f335b 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -42,6 +42,7 @@ static const char *cpu_name[] = {
[CPU_R8000] = "R8000",
[CPU_R10000] = "R10000",
[CPU_R12000] = "R12000",
+ [CPU_R14000] = "R14000",
[CPU_R4300] = "R4300",
[CPU_R4650] = "R4650",
[CPU_R4700] = "R4700",
@@ -74,6 +75,7 @@ static const char *cpu_name[] = {
[CPU_24K] = "MIPS 24K",
[CPU_25KF] = "MIPS 25Kf",
[CPU_34K] = "MIPS 34K",
+ [CPU_74K] = "MIPS 74K",
[CPU_VR4111] = "NEC VR4111",
[CPU_VR4121] = "NEC VR4121",
[CPU_VR4122] = "NEC VR4122",
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b53a9207f530..8efb23a84131 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -209,7 +209,7 @@ sys_call_table:
PTR sys_fork
PTR sys_read
PTR sys_write
- PTR sys_open /* 4005 */
+ PTR compat_sys_open /* 4005 */
PTR sys_close
PTR sys_waitpid
PTR sys_creat
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index bcf1b10e518f..397a70e651b5 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -246,7 +246,7 @@ static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_en
#ifdef CONFIG_64BIT
/* HACK: Guess if the sign extension was forgotten */
if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
- start |= 0xffffffff00000000;
+ start |= 0xffffffff00000000UL;
#endif
end = start + size;
@@ -355,8 +355,6 @@ static inline void bootmem_init(void)
}
#endif
- memory_present(0, first_usable_pfn, max_low_pfn);
-
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
@@ -410,6 +408,7 @@ static inline void bootmem_init(void)
/* Register lowmem ranges */
free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ memory_present(0, curr_pfn, curr_pfn + size - 1);
}
/* Reserve the bootmap memory. */
@@ -419,17 +418,20 @@ static inline void bootmem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
initrd_below_start_ok = 1;
if (initrd_start) {
- unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
+ unsigned long initrd_size = ((unsigned char *)initrd_end) -
+ ((unsigned char *)initrd_start);
+ const int width = sizeof(long) * 2;
+
printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
(void *)initrd_start, initrd_size);
if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk("initrd extends beyond end of memory "
"(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
- sizeof(long) * 2,
- (unsigned long long)CPHYSADDR(initrd_end),
- sizeof(long) * 2,
- (unsigned long long)PFN_PHYS(max_low_pfn));
+ width,
+ (unsigned long long) CPHYSADDR(initrd_end),
+ width,
+ (unsigned long long) PFN_PHYS(max_low_pfn));
initrd_start = initrd_end = 0;
initrd_reserve_bootmem = 0;
}
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 3ca786215d48..ce6cb915c0a7 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -31,7 +31,6 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
save_gp_reg(31);
#undef save_gp_reg
-#ifdef CONFIG_32BIT
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
@@ -43,20 +42,6 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
-#endif
-#ifdef CONFIG_64BIT
- err |= __put_user(regs->hi, &sc->sc_hi[0]);
- err |= __put_user(regs->lo, &sc->sc_lo[0]);
- if (cpu_has_dsp) {
- err |= __put_user(mfhi1(), &sc->sc_hi[1]);
- err |= __put_user(mflo1(), &sc->sc_lo[1]);
- err |= __put_user(mfhi2(), &sc->sc_hi[2]);
- err |= __put_user(mflo2(), &sc->sc_lo[2]);
- err |= __put_user(mfhi3(), &sc->sc_hi[3]);
- err |= __put_user(mflo3(), &sc->sc_lo[3]);
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- }
-#endif
err |= __put_user(!!used_math(), &sc->sc_used_math);
@@ -92,7 +77,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
-#ifdef CONFIG_32BIT
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
@@ -104,20 +88,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
-#endif
-#ifdef CONFIG_64BIT
- err |= __get_user(regs->hi, &sc->sc_hi[0]);
- err |= __get_user(regs->lo, &sc->sc_lo[0]);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg);
- err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg);
- err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-#endif
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d42f358754ad..298f82fe8440 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -247,6 +247,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
smp_tune_scheduling();
plat_prepare_cpus(max_cpus);
+#ifndef CONFIG_HOTPLUG_CPU
+ cpu_present_map = cpu_possible_map;
+#endif
}
/* preload SMP state for boot cpu */
@@ -442,7 +445,7 @@ static int __init topology_init(void)
int cpu;
int ret;
- for_each_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 2aeaa2fd4b32..5e8a18a8e2bd 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -276,31 +276,9 @@ void sys_set_thread_area(unsigned long addr)
asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
{
- int tmp, len;
- char __user *name;
+ int tmp;
switch(cmd) {
- case SETNAME: {
- char nodename[__NEW_UTS_LEN + 1];
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- name = (char __user *) arg1;
-
- len = strncpy_from_user(nodename, name, __NEW_UTS_LEN);
- if (len < 0)
- return -EFAULT;
-
- down_write(&uts_sem);
- strncpy(system_utsname.nodename, nodename, len);
- nodename[__NEW_UTS_LEN] = '\0';
- strlcpy(system_utsname.nodename, nodename,
- sizeof(system_utsname.nodename));
- up_write(&uts_sem);
- return 0;
- }
-
case MIPS_ATOMIC_SET:
printk(KERN_CRIT "How did I get here?\n");
return -EINVAL;
@@ -313,9 +291,6 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
case FLUSH_CACHE:
__flush_cache_all();
return 0;
-
- case MIPS_RDNVRAM:
- return -EIO;
}
return -EINVAL;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 4901f0a37fca..a7564b08eb4d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -819,15 +819,30 @@ asmlinkage void do_watch(struct pt_regs *regs)
asmlinkage void do_mcheck(struct pt_regs *regs)
{
+ const int field = 2 * sizeof(unsigned long);
+ int multi_match = regs->cp0_status & ST0_TS;
+
show_regs(regs);
- dump_tlb_all();
+
+ if (multi_match) {
+ printk("Index : %0x\n", read_c0_index());
+ printk("Pagemask: %0x\n", read_c0_pagemask());
+ printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
+ printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
+ printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
+ printk("\n");
+ dump_tlb_all();
+ }
+
+ show_code((unsigned int *) regs->cp0_epc);
+
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
- (regs->cp0_status & ST0_TS) ? "" : "not ");
+ (multi_match) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
@@ -902,6 +917,7 @@ static inline void parity_protection_init(void)
{
switch (current_cpu_data.cputype) {
case CPU_24K:
+ case CPU_34K:
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 14fa00e3cdfa..b84d1f9ce28e 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -151,23 +151,13 @@ SECTIONS
/* This is the MIPS specific mdebug section. */
.mdebug : { *(.mdebug) }
- /* These are needed for ELF backends which have not yet been
- converted to the new style linker. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- /* DWARF debug sections.
- Symbols in the .debug DWARF section are relative to the beginning of the
- section so we begin .debug at 0. It's not clear yet what needs to happen
- for the others. */
- .debug 0 : { *(.debug) }
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- .line 0 : { *(.line) }
+
+ STABS_DEBUG
+
+ DWARF_DEBUG
+
/* These must appear regardless of . */
.gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
.gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
- .comment : { *(.comment) }
.note : { *(.note) }
}
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
index a1962eb460f8..39a71de16f47 100644
--- a/arch/mips/math-emu/dp_fint.c
+++ b/arch/mips/math-emu/dp_fint.c
@@ -29,7 +29,9 @@
ieee754dp ieee754dp_fint(int x)
{
- COMPXDP;
+ u64 xm;
+ int xe;
+ int xs;
CLEARCX;
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
index eae90a866aa1..f08f223e488a 100644
--- a/arch/mips/math-emu/dp_flong.c
+++ b/arch/mips/math-emu/dp_flong.c
@@ -29,7 +29,9 @@
ieee754dp ieee754dp_flong(s64 x)
{
- COMPXDP;
+ u64 xm;
+ int xe;
+ int xs;
CLEARCX;
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index 7aac13afb09a..e88e125e01c2 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -29,7 +29,9 @@
ieee754sp ieee754sp_fint(int x)
{
- COMPXSP;
+ unsigned xm;
+ int xe;
+ int xs;
CLEARCX;
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
index 3d6c1d11c178..26d6919a269a 100644
--- a/arch/mips/math-emu/sp_flong.c
+++ b/arch/mips/math-emu/sp_flong.c
@@ -29,7 +29,9 @@
ieee754sp ieee754sp_flong(s64 x)
{
- COMPXDP; /* <--- need 64-bit mantissa temp */
+ u64 xm; /* <--- need 64-bit mantissa temp */
+ int xe;
+ int xs;
CLEARCX;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4182e1176fae..4a43924cd4fc 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -29,6 +29,27 @@
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
+
+/*
+ * Special Variant of smp_call_function for use by cache functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ */
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ preempt_disable();
+
+#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
+ smp_call_function(func, info, retry, wait);
+#endif
+ func(info);
+ preempt_enable();
+}
+
/*
* Must die.
*/
@@ -299,7 +320,7 @@ static void r4k_flush_cache_all(void)
if (!cpu_has_dc_aliases)
return;
- on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
}
static inline void local_r4k___flush_cache_all(void * args)
@@ -314,13 +335,14 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R4400MC:
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
r4k_blast_scache();
}
}
static void r4k___flush_cache_all(void)
{
- on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
}
static inline void local_r4k_flush_cache_range(void * args)
@@ -341,7 +363,7 @@ static inline void local_r4k_flush_cache_range(void * args)
static void r4k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
}
static inline void local_r4k_flush_cache_mm(void * args)
@@ -370,7 +392,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
}
struct flush_cache_page_args {
@@ -461,7 +483,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -471,7 +493,7 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
static void r4k_flush_data_cache_page(unsigned long addr)
{
- on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
}
struct flush_icache_range_args {
@@ -514,7 +536,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
- on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
instruction_hazard();
}
@@ -590,7 +612,7 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma,
args.vma = vma;
args.page = page;
- on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
}
@@ -689,7 +711,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
}
static void r4k_flush_icache_all(void)
@@ -812,6 +834,7 @@ static void __init probe_pcache(void)
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
c->icache.linesz = 64;
c->icache.ways = 2;
@@ -965,9 +988,11 @@ static void __init probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_PINDEX;
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
case CPU_SB1:
break;
case CPU_24K:
+ case CPU_34K:
if (!(read_c0_config7() & (1 << 16)))
default:
if (c->dcache.waysize > PAGE_SIZE)
@@ -1091,6 +1116,7 @@ static void __init setup_scache(void)
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
c->scache.linesz = 64 << ((config >> 13) & 1);
c->scache.ways = 2;
@@ -1135,6 +1161,31 @@ static void __init setup_scache(void)
c->options |= MIPS_CPU_SUBSET_CACHES;
}
+void au1x00_fixup_config_od(void)
+{
+ /*
+ * c0_config.od (bit 19) was write only (and read as 0)
+ * on the early revisions of Alchemy SOCs. It disables the bus
+ * transaction overlapping and needs to be set to fix various errata.
+ */
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ case 0x01030200: /* Au1500 AB */
+ /*
+ * Au1100 errata actually keeps silence about this bit, so we set it
+ * just in case for those revisions that require it to be set according
+ * to arch/mips/au1000/common/cputable.c
+ */
+ case 0x02030200: /* Au1100 AB */
+ case 0x02030201: /* Au1100 BA */
+ case 0x02030202: /* Au1100 BC */
+ set_c0_config(1 << 19);
+ break;
+ }
+}
+
static inline void coherency_setup(void)
{
change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
@@ -1155,6 +1206,15 @@ static inline void coherency_setup(void)
case CPU_R4400MC:
clear_c0_config(CONF_CU);
break;
+ /*
+ * We need to catch the ealry Alchemy SOCs with
+ * the write-only co_config.od bit and set it back to one...
+ */
+ case CPU_AU1000: /* rev. DA, HA, HB */
+ case CPU_AU1100: /* rev. AB, BA, BC ?? */
+ case CPU_AU1500: /* rev. AB */
+ au1x00_fixup_config_od();
+ break;
}
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c22308b93ff0..33f6e1cdfd5b 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -227,7 +227,7 @@ void __init mem_init(void)
for (tmp = 0; tmp < max_low_pfn; tmp++)
if (page_is_ram(tmp)) {
ram++;
- if (PageReserved(mem_map+tmp))
+ if (PageReserved(pfn_to_page(tmp)))
reservedpages++;
}
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
index e4390dc3eb48..b7c749232ffe 100644
--- a/arch/mips/mm/pg-r4k.c
+++ b/arch/mips/mm/pg-r4k.c
@@ -357,6 +357,7 @@ void __init build_clear_page(void)
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
pref_src_mode = Pref_LoadStreamed;
pref_dst_mode = Pref_StoreStreamed;
break;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 053dbacac56b..54507be2ab5b 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -875,6 +875,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_R10000:
case CPU_R12000:
+ case CPU_R14000:
case CPU_4KC:
case CPU_SB1:
case CPU_SB1A:
@@ -906,6 +907,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_4KEC:
case CPU_24K:
case CPU_34K:
+ case CPU_74K:
i_ehb(p);
tlbw(p);
break;
diff --git a/arch/mips/momentum/jaguar_atx/dbg_io.c b/arch/mips/momentum/jaguar_atx/dbg_io.c
index 542eac82b63c..d7dea0a136aa 100644
--- a/arch/mips/momentum/jaguar_atx/dbg_io.c
+++ b/arch/mips/momentum/jaguar_atx/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 8720bccfdea2..f0a6a38fcf4d 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 8720bccfdea2..f0a6a38fcf4d 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
/* disable interrupts */
UART16550_WRITE(OFS_INTR_ENABLE, 0);
- /* set up buad rate */
+ /* set up baud rate */
{
uint32 divisor;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index f2b4862aaae5..c31e4cff64e0 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -14,8 +14,8 @@
#include "op_impl.h"
-extern struct op_mips_model op_model_mipsxx __attribute__((weak));
-extern struct op_mips_model op_model_rm9000 __attribute__((weak));
+extern struct op_mips_model op_model_mipsxx_ops __attribute__((weak));
+extern struct op_mips_model op_model_rm9000_ops __attribute__((weak));
static struct op_mips_model *model;
@@ -80,13 +80,14 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_24K:
case CPU_25KF:
case CPU_34K:
+ case CPU_74K:
case CPU_SB1:
case CPU_SB1A:
- lmodel = &op_model_mipsxx;
+ lmodel = &op_model_mipsxx_ops;
break;
case CPU_RM9000:
- lmodel = &op_model_rm9000;
+ lmodel = &op_model_rm9000_ops;
break;
};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 95d488ca0754..f26a00e13204 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -23,7 +23,7 @@
#define M_COUNTER_OVERFLOW (1UL << 31)
-struct op_mips_model op_model_mipsxx;
+struct op_mips_model op_model_mipsxx_ops;
static struct mipsxx_register_config {
unsigned int control[4];
@@ -34,7 +34,7 @@ static struct mipsxx_register_config {
static void mipsxx_reg_setup(struct op_counter_config *ctr)
{
- unsigned int counters = op_model_mipsxx.num_counters;
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
int i;
/* Compute the performance counter control word. */
@@ -62,7 +62,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
static void mipsxx_cpu_setup (void *args)
{
- unsigned int counters = op_model_mipsxx.num_counters;
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
switch (counters) {
case 4:
@@ -83,7 +83,7 @@ static void mipsxx_cpu_setup (void *args)
/* Start all counters on current CPU */
static void mipsxx_cpu_start(void *args)
{
- unsigned int counters = op_model_mipsxx.num_counters;
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
switch (counters) {
case 4:
@@ -100,7 +100,7 @@ static void mipsxx_cpu_start(void *args)
/* Stop all counters on current CPU */
static void mipsxx_cpu_stop(void *args)
{
- unsigned int counters = op_model_mipsxx.num_counters;
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
switch (counters) {
case 4:
@@ -116,7 +116,7 @@ static void mipsxx_cpu_stop(void *args)
static int mipsxx_perfcount_handler(struct pt_regs *regs)
{
- unsigned int counters = op_model_mipsxx.num_counters;
+ unsigned int counters = op_model_mipsxx_ops.num_counters;
unsigned int control;
unsigned int counter;
int handled = 0;
@@ -187,33 +187,37 @@ static int __init mipsxx_init(void)
reset_counters(counters);
- op_model_mipsxx.num_counters = counters;
+ op_model_mipsxx_ops.num_counters = counters;
switch (current_cpu_data.cputype) {
case CPU_20KC:
- op_model_mipsxx.cpu_type = "mips/20K";
+ op_model_mipsxx_ops.cpu_type = "mips/20K";
break;
case CPU_24K:
- op_model_mipsxx.cpu_type = "mips/24K";
+ op_model_mipsxx_ops.cpu_type = "mips/24K";
break;
case CPU_25KF:
- op_model_mipsxx.cpu_type = "mips/25K";
+ op_model_mipsxx_ops.cpu_type = "mips/25K";
break;
#ifndef CONFIG_SMP
case CPU_34K:
- op_model_mipsxx.cpu_type = "mips/34K";
+ op_model_mipsxx_ops.cpu_type = "mips/34K";
+ break;
+
+ case CPU_74K:
+ op_model_mipsxx_ops.cpu_type = "mips/74K";
break;
#endif
case CPU_5KC:
- op_model_mipsxx.cpu_type = "mips/5K";
+ op_model_mipsxx_ops.cpu_type = "mips/5K";
break;
case CPU_SB1:
case CPU_SB1A:
- op_model_mipsxx.cpu_type = "mips/sb1";
+ op_model_mipsxx_ops.cpu_type = "mips/sb1";
break;
default:
@@ -229,12 +233,12 @@ static int __init mipsxx_init(void)
static void mipsxx_exit(void)
{
- reset_counters(op_model_mipsxx.num_counters);
+ reset_counters(op_model_mipsxx_ops.num_counters);
perf_irq = null_perf_irq;
}
-struct op_mips_model op_model_mipsxx = {
+struct op_mips_model op_model_mipsxx_ops = {
.reg_setup = mipsxx_reg_setup,
.cpu_setup = mipsxx_cpu_setup,
.init = mipsxx_init,
diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c
index 9b75e41c78ef..b7063fefa65b 100644
--- a/arch/mips/oprofile/op_model_rm9000.c
+++ b/arch/mips/oprofile/op_model_rm9000.c
@@ -126,7 +126,7 @@ static void rm9000_exit(void)
free_irq(rm9000_perfcount_irq, NULL);
}
-struct op_mips_model op_model_rm9000 = {
+struct op_mips_model op_model_rm9000_ops = {
.reg_setup = rm9000_reg_setup,
.cpu_setup = rm9000_cpu_setup,
.init = rm9000_init,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index de01c9815bdd..8ba08047d164 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -31,12 +31,12 @@
/* issue a PIO read to make sure no PIO writes are pending */
static void inline flush_crime_bus(void)
{
- volatile unsigned long junk = crime->control;
+ crime->control;
}
static void inline flush_mace_bus(void)
{
- volatile unsigned long junk = mace->perif.ctrl.misc;
+ mace->perif.ctrl.misc;
}
#undef DEBUG_IRQ
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 41e9ab40cd54..f70bd090dacd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void)
/* try calling the ibm,client-architecture-support method */
if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
+ root,
ADDR(ibm_architecture_vec)) == 0) {
/* the call exists... */
if (ret)
@@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void)
if (strstr(p, RELOC("Power Macintosh")) ||
strstr(p, RELOC("MacRISC")))
return PLATFORM_POWERMAC;
+#ifdef CONFIG_PPC64
+ /* We must make sure we don't detect the IBM Cell
+ * blades as pSeries due to some firmware issues,
+ * so we do it here.
+ */
+ if (strstr(p, RELOC("IBM,CBEA")) ||
+ strstr(p, RELOC("IBM,CPBW-1.0")))
+ return PLATFORM_GENERIC;
+#endif /* CONFIG_PPC64 */
i += sl + 1;
}
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 01e3c08cb550..8fdeca2d4597 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
if (__get_user(cmcp, &ucp->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
+ /* no need to check access_ok(mcp), since mcp < 4GB */
}
#else
if (__get_user(mcp, &ucp->uc_regs))
return -EFAULT;
+ if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
+ return -EFAULT;
#endif
restore_sigmask(&set);
if (restore_user_regs(regs, mcp, sig))
@@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
{
struct sig_dbg_op op;
int i;
+ unsigned char tmp;
unsigned long new_msr = regs->msr;
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
unsigned long new_dbcr0 = current->thread.dbcr0;
#endif
for (i=0; i<ndbg; i++) {
- if (__copy_from_user(&op, dbg, sizeof(op)))
+ if (copy_from_user(&op, dbg + i, sizeof(op)))
return -EFAULT;
switch (op.dbg_type) {
case SIG_DBG_SINGLE_STEPPING:
@@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
current->thread.dbcr0 = new_dbcr0;
#endif
+ if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
+ || __get_user(tmp, (u8 __user *) ctx)
+ || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
+ return -EFAULT;
+
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27f65b95184d..c2db642f4cdd 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
if (err)
return err;
+ if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
+ return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != 0 && (msr & MSR_VEC) != 0)
err |= __copy_from_user(current->thread.vr, v_regs,
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 33654d1b1b43..994856e55b7c 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -52,7 +52,7 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
default:
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
- va |= (0x7f >> (8 - penc)) << 12;
+ va |= penc << 12;
asm volatile("tlbie %0,1" : : "r" (va) : "memory");
break;
}
@@ -74,7 +74,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
default:
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
- va |= (0x7f >> (8 - penc)) << 12;
+ va |= penc << 12;
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
break;
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6574b22b3cf3..fd3e5609e3e0 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -125,14 +125,13 @@ static void __init cell_init_early(void)
static int __init cell_probe(void)
{
- /* XXX This is temporary, the Cell maintainer will come up with
- * more appropriate detection logic
- */
unsigned long root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
- return 0;
- return 1;
+ if (of_flat_dt_is_compatible(root, "IBM,CBEA") ||
+ of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ return 1;
+
+ return 0;
}
/*
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index df2343e1956b..c896ce83d412 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -1157,6 +1157,7 @@ EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
/* some quirks for platform function decoding */
enum {
pmac_i2c_quirk_invmask = 0x00000001u,
+ pmac_i2c_quirk_skip = 0x00000002u,
};
static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
@@ -1172,6 +1173,15 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
/* XXX Study device-tree's & apple drivers are get the quirks
* right !
*/
+ /* Workaround: It seems that running the clockspreading
+ * properties on the eMac will cause lockups during boot.
+ * The machine seems to work fine without that. So for now,
+ * let's make sure i2c-hwclock doesn't match about "imic"
+ * clocks and we'll figure out if we really need to do
+ * something special about those later.
+ */
+ { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip },
+ { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip },
{ "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
{ "i2c-cpu-voltage", NULL, 0},
{ "temp-monitor", NULL, 0 },
@@ -1198,6 +1208,8 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
if (p->compatible &&
!device_is_compatible(np, p->compatible))
continue;
+ if (p->quirks & pmac_i2c_quirk_skip)
+ break;
callback(np, p->quirks);
break;
}
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 4baa75b1d36f..f08173b0f065 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
#include <asm/prom.h>
@@ -546,6 +547,7 @@ struct pmf_device {
static LIST_HEAD(pmf_devices);
static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_MUTEX(pmf_irq_mutex);
static void pmf_release_device(struct kref *kref)
{
@@ -864,15 +866,17 @@ int pmf_register_irq_client(struct device_node *target,
spin_lock_irqsave(&pmf_lock, flags);
func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
- if (func == NULL) {
- spin_unlock_irqrestore(&pmf_lock, flags);
+ if (func)
+ func = pmf_get_function(func);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ if (func == NULL)
return -ENODEV;
- }
+ mutex_lock(&pmf_irq_mutex);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_enable(func);
list_add(&client->link, &func->irq_clients);
client->func = func;
- spin_unlock_irqrestore(&pmf_lock, flags);
+ mutex_unlock(&pmf_irq_mutex);
return 0;
}
@@ -881,16 +885,16 @@ EXPORT_SYMBOL_GPL(pmf_register_irq_client);
void pmf_unregister_irq_client(struct pmf_irq_client *client)
{
struct pmf_function *func = client->func;
- unsigned long flags;
BUG_ON(func == NULL);
- spin_lock_irqsave(&pmf_lock, flags);
+ mutex_lock(&pmf_irq_mutex);
client->func = NULL;
list_del(&client->link);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_disable(func);
- spin_unlock_irqrestore(&pmf_lock, flags);
+ mutex_unlock(&pmf_irq_mutex);
+ pmf_put_function(func);
}
EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 5f79f01c44f2..3ba87835757e 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
static int __init pSeries_probe(void)
{
+ unsigned long root = of_get_flat_dt_root();
char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
"device_type", NULL);
if (dtype == NULL)
@@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
if (strcmp(dtype, "chrp"))
return 0;
+ /* Cell blades firmware claims to be chrp while it's not. Until this
+ * is fixed, we need to avoid those here.
+ */
+ if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
+ of_flat_dt_is_compatible(root, "IBM,CBEA"))
+ return 0;
+
DBG("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index a93f5da6855d..40b42c88e6a7 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+
+ /* this is required to tune the scheduler correctly */
+ /* is it possible to have CPUs with different cache sizes? */
+ if (id == boot_cpu_id) {
+ int cache_line,cache_nlines;
+ cache_line = 0x20;
+ cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
+ cache_nlines = 0x8000;
+ cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
+ max_cache_size = cache_line * cache_nlines;
+ }
if (cpu_data(id).mid < 0)
panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
}
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 3eadac5e171e..31c5892f5acc 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <linux/version.h>
#include <linux/errno.h>
+#include <linux/threads.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
@@ -493,6 +494,35 @@ tlb_fixup_done:
call prom_init
mov %l7, %o0 ! OpenPROM cif handler
+ /* Initialize current_thread_info()->cpu as early as possible.
+ * In order to do that accurately we have to patch up the get_cpuid()
+ * assembler sequences. And that, in turn, requires that we know
+ * if we are on a Starfire box or not. While we're here, patch up
+ * the sun4v sequences as well.
+ */
+ call check_if_starfire
+ nop
+ call per_cpu_patch
+ nop
+ call sun4v_patch
+ nop
+
+#ifdef CONFIG_SMP
+ call hard_smp_processor_id
+ nop
+ cmp %o0, NR_CPUS
+ blu,pt %xcc, 1f
+ nop
+ call boot_cpu_id_too_large
+ nop
+ /* Not reached... */
+
+1:
+#else
+ mov 0, %o0
+#endif
+ stb %o0, [%g6 + TI_CPU]
+
/* Off we go.... */
call start_kernel
nop
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 2b7a1f316a93..0c0895202970 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
/* SUN4V PCI configuration space accessors. */
-static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
+struct pdev_entry {
+ struct pdev_entry *next;
+ u32 devhandle;
+ unsigned int bus;
+ unsigned int device;
+ unsigned int func;
+};
+
+#define PDEV_HTAB_SIZE 16
+#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
+static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
+
+static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
{
- if (bus == pbm->pci_first_busno) {
- if (device == 0 && func == 0)
- return 0;
- return 1;
+ unsigned int val;
+
+ val = (devhandle ^ (devhandle >> 4));
+ val ^= bus;
+ val ^= device;
+ val ^= func;
+
+ return val & PDEV_HTAB_MASK;
+}
+
+static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
+{
+ struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct pdev_entry **slot;
+
+ if (!p)
+ return -ENOMEM;
+
+ slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
+ p->next = *slot;
+ *slot = p;
+
+ p->devhandle = devhandle;
+ p->bus = bus;
+ p->device = device;
+ p->func = func;
+
+ return 0;
+}
+
+/* Recursively descend into the OBP device tree, rooted at toplevel_node,
+ * looking for a PCI device matching bus and devfn.
+ */
+static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn)
+{
+ toplevel_node = prom_getchild(toplevel_node);
+
+ while (toplevel_node != 0) {
+ int ret = obp_find(pregs, toplevel_node, bus, devfn);
+
+ if (ret != 0)
+ return ret;
+
+ ret = prom_getproperty(toplevel_node, "reg", (char *) pregs,
+ sizeof(*pregs) * PROMREG_MAX);
+ if (ret == 0 || ret == -1)
+ goto next_sibling;
+
+ if (((pregs[0].phys_hi >> 16) & 0xff) == bus &&
+ ((pregs[0].phys_hi >> 8) & 0xff) == devfn)
+ break;
+
+ next_sibling:
+ toplevel_node = prom_getsibling(toplevel_node);
+ }
+
+ return toplevel_node;
+}
+
+static int pdev_htab_populate(struct pci_pbm_info *pbm)
+{
+ struct linux_prom_pci_registers pr[PROMREG_MAX];
+ u32 devhandle = pbm->devhandle;
+ unsigned int bus;
+
+ for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
+ unsigned int devfn;
+
+ for (devfn = 0; devfn < 256; devfn++) {
+ unsigned int device = PCI_SLOT(devfn);
+ unsigned int func = PCI_FUNC(devfn);
+
+ if (obp_find(pr, pbm->prom_node, bus, devfn)) {
+ int err = pdev_htab_add(devhandle, bus,
+ device, func);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
+{
+ struct pdev_entry *p;
+
+ p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
+ while (p) {
+ if (p->devhandle == devhandle &&
+ p->bus == bus &&
+ p->device == device &&
+ p->func == func)
+ break;
+
+ p = p->next;
}
+ return p;
+}
+
+static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
+{
if (bus < pbm->pci_first_busno ||
bus > pbm->pci_last_busno)
return 1;
- return 0;
+ return pdev_find(pbm->devhandle, bus, device, func) == NULL;
}
static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
pci_sun4v_get_bus_range(pbm);
pci_sun4v_iommu_init(pbm);
+
+ pdev_htab_populate(pbm);
}
void sun4v_pci_init(int node, char *model_name)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 005167f82419..9cf1c88cd774 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE];
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-static void __init per_cpu_patch(void)
+void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
unsigned long ver;
@@ -280,7 +280,7 @@ static void __init per_cpu_patch(void)
}
}
-static void __init sun4v_patch(void)
+void __init sun4v_patch(void)
{
struct sun4v_1insn_patch_entry *p1;
struct sun4v_2insn_patch_entry *p2;
@@ -315,6 +315,15 @@ static void __init sun4v_patch(void)
}
}
+#ifdef CONFIG_SMP
+void __init boot_cpu_id_too_large(int cpu)
+{
+ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+ cpu, NR_CPUS);
+ prom_halt();
+}
+#endif
+
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
@@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &prom_con;
#endif
- /* Work out if we are starfire early on */
- check_if_starfire();
-
- /* Now we know enough to patch the get_cpuid sequences
- * used by trap code.
- */
- per_cpu_patch();
-
- sun4v_patch();
-
boot_flags_init(*cmdline_p);
idprom_init();
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 90eaca3ec9a6..f03d52d0b88d 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1264,7 +1264,6 @@ void __init smp_tick_init(void)
boot_cpu_id = hard_smp_processor_id();
current_tick_offset = timer_tick_offset;
- cpu_set(boot_cpu_id, cpu_online_map);
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
@@ -1288,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+static void __init smp_tune_scheduling(void)
+{
+ int instance, node;
+ unsigned int def, smallest = ~0U;
+
+ def = ((tlb_type == hypervisor) ?
+ (3 * 1024 * 1024) :
+ (4 * 1024 * 1024));
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, &node, NULL)) {
+ unsigned int val;
+
+ val = prom_getintdefault(node, "ecache-size", def);
+ if (val < smallest)
+ smallest = val;
+
+ instance++;
+ }
+
+ /* Any value less than 256K is nonsense. */
+ if (smallest < (256U * 1024U))
+ smallest = 256 * 1024;
+
+ max_cache_size = smallest;
+
+ if (smallest < 1U * 1024U * 1024U)
+ printk(KERN_INFO "Using max_cache_size of %uKB\n",
+ smallest / 1024U);
+ else
+ printk(KERN_INFO "Using max_cache_size of %uMB\n",
+ smallest / 1024U / 1024U);
+}
+
/* Constrain the number of cpus to max_cpus. */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -1323,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
smp_store_cpu_info(boot_cpu_id);
+ smp_tune_scheduling();
}
/* Set this up early so that things like the scheduler can init
@@ -1345,18 +1379,6 @@ void __init smp_setup_cpu_possible_map(void)
void __devinit smp_prepare_boot_cpu(void)
{
- int cpu = hard_smp_processor_id();
-
- if (cpu >= NR_CPUS) {
- prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
- prom_halt();
- }
-
- current_thread_info()->cpu = cpu;
- __local_per_cpu_offset = __per_cpu_offset(cpu);
-
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), phys_cpu_present_map);
}
int __devinit __cpu_up(unsigned int cpu)
@@ -1433,4 +1455,7 @@ void __init setup_per_cpu_areas(void)
for (i = 0; i < NR_CPUS; i++, ptr += size)
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+ /* Setup %g5 for the boot cpu. */
+ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 62d8a99271ea..38e569f786dd 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -297,7 +297,6 @@ EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(compat_sys_ioctl);
EXPORT_SYMBOL(sparc32_open);
-EXPORT_SYMBOL(sys_close);
#endif
/* Special internal versions of library functions. */
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 2793a5d82380..563db528e031 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
};
}
-static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+extern void __show_regs(struct pt_regs * regs);
+
+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
{
int cnt;
@@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
pfx,
ent->err_raddr, ent->err_size, ent->err_cpu);
+ __show_regs(regs);
+
if ((cnt = atomic_read(ocnt)) != 0) {
atomic_set(ocnt, 0);
wmb();
@@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
put_cpu();
- sun4v_log_error(&local_copy, cpu,
+ sun4v_log_error(regs, &local_copy, cpu,
KERN_ERR "RESUMABLE ERROR",
&sun4v_resum_oflow_cnt);
}
@@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
}
#endif
- sun4v_log_error(&local_copy, cpu,
+ sun4v_log_error(regs, &local_copy, cpu,
KERN_EMERG "NON-RESUMABLE ERROR",
&sun4v_nonresum_oflow_cnt);
@@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
void die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
- extern void __show_regs(struct pt_regs * regs);
extern void smp_report_regs(void);
int count = 0;
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
index ba9cd3ccc2b2..1d230f693dc4 100644
--- a/arch/sparc64/lib/checksum.S
+++ b/arch/sparc64/lib/checksum.S
@@ -165,8 +165,9 @@ csum_partial_end_cruft:
sll %g1, 8, %g1
or %o5, %g1, %o4
-1: add %o2, %o4, %o2
+1: addcc %o2, %o4, %o2
+ addc %g0, %o2, %o2
csum_partial_finish:
retl
- mov %o2, %o0
+ srl %o2, 0, %o0
diff --git a/arch/sparc64/lib/csum_copy.S b/arch/sparc64/lib/csum_copy.S
index 71af48839064..e566c770a0f6 100644
--- a/arch/sparc64/lib/csum_copy.S
+++ b/arch/sparc64/lib/csum_copy.S
@@ -221,11 +221,12 @@ FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
sll %g1, 8, %g1
or %o5, %g1, %o4
-1: add %o3, %o4, %o3
+1: addcc %o3, %o4, %o3
+ addc %g0, %o3, %o3
70:
retl
- mov %o3, %o0
+ srl %o3, 0, %o0
95: mov 0, GLOBAL_SPARE
brlez,pn %o2, 4f
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 7a0e04e34bf9..b65ca115ef77 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -33,5 +33,9 @@ include $(srctree)/arch/i386/Makefile.cpu
# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
+# Prevent sprintf in nfsd from being converted to strcpy and resulting in
+# an unresolved reference.
+cflags-y += -ffreestanding
+
CFLAGS += $(cflags-y)
USER_CFLAGS += $(cflags-y)
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index efa3d33c0be6..310980b32173 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -120,20 +120,11 @@ extern int is_syscall(unsigned long addr);
extern void free_irq(unsigned int, void *);
extern int cpu(void);
+extern void time_init_kern(void);
+
/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
extern int __cant_sleep(void);
extern void segv_handler(int sig, union uml_pt_regs *regs);
extern void sigio_handler(int sig, union uml_pt_regs *regs);
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 528cf623f8b4..86f51d04c98d 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -84,6 +84,16 @@ void timer_irq(union uml_pt_regs *regs)
}
}
+
+void time_init_kern(void)
+{
+ unsigned long long nsecs;
+
+ nsecs = os_nsecs();
+ set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
+ -nsecs % BILLION);
+}
+
void do_boot_timer_handler(struct sigcontext * sc)
{
struct pt_regs regs;
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 3a0ac38e978b..90912aaca7aa 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -59,7 +59,7 @@ static __init void do_uml_initcalls(void)
initcall_t *call;
call = &__uml_initcall_start;
- while (call < &__uml_initcall_end){;
+ while (call < &__uml_initcall_end){
(*call)();
call++;
}
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index 6f7626775acb..280c4fb9b585 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -81,20 +81,12 @@ void uml_idle_timer(void)
set_interval(ITIMER_REAL);
}
-extern void ktime_get_ts(struct timespec *ts);
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
-
void time_init(void)
{
- struct timespec now;
-
if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
panic("Couldn't set SIGVTALRM handler");
set_interval(ITIMER_VIRTUAL);
-
- do_posix_clock_monotonic_gettime(&now);
- wall_to_monotonic.tv_sec = -now.tv_sec;
- wall_to_monotonic.tv_nsec = -now.tv_nsec;
+ time_init_kern();
}
unsigned long long os_nsecs(void)
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
index 749dd1bfe60f..710d5fb807e1 100644
--- a/arch/um/sys-i386/syscalls.c
+++ b/arch/um/sys-i386/syscalls.c
@@ -99,11 +99,12 @@ long sys_ipc (uint call, int first, int second,
switch (call) {
case SEMOP:
- return sys_semtimedop(first, (struct sembuf *) ptr, second,
- NULL);
+ return sys_semtimedop(first, (struct sembuf __user *) ptr,
+ second, NULL);
case SEMTIMEDOP:
- return sys_semtimedop(first, (struct sembuf *) ptr, second,
- (const struct timespec *) fifth);
+ return sys_semtimedop(first, (struct sembuf __user *) ptr,
+ second,
+ (const struct timespec __user *) fifth);
case SEMGET:
return sys_semget (first, second, third);
case SEMCTL: {
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index a4c46a8af008..9edf114faf79 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -21,7 +21,7 @@
#include "skas.h"
static int copy_sc_from_user_skas(struct pt_regs *regs,
- struct sigcontext *from)
+ struct sigcontext __user *from)
{
int err = 0;
@@ -54,7 +54,8 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
return(err);
}
-int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
+int copy_sc_to_user_skas(struct sigcontext __user *to,
+ struct _fpstate __user *to_fp,
struct pt_regs *regs, unsigned long mask,
unsigned long sp)
{
@@ -106,10 +107,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
#endif
#ifdef CONFIG_MODE_TT
-int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
+int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
int fpsize)
{
- struct _fpstate *to_fp, *from_fp;
+ struct _fpstate *to_fp;
+ struct _fpstate __user *from_fp;
unsigned long sigs;
int err;
@@ -124,13 +126,14 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
return(err);
}
-int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
+int copy_sc_to_user_tt(struct sigcontext __user *to, struct _fpstate __user *fp,
struct sigcontext *from, int fpsize, unsigned long sp)
{
- struct _fpstate *to_fp, *from_fp;
+ struct _fpstate __user *to_fp;
+ struct _fpstate *from_fp;
int err;
- to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
+ to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
from_fp = from->fpstate;
err = copy_to_user(to, from, sizeof(*to));
/* The SP in the sigcontext is the updated one for the signal
@@ -158,7 +161,8 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
return(ret);
}
-static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
+static int copy_sc_to_user(struct sigcontext __user *to,
+ struct _fpstate __user *fp,
struct pt_regs *from, unsigned long mask,
unsigned long sp)
{
@@ -169,7 +173,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
struct rt_sigframe
{
- char *pretcode;
+ char __user *pretcode;
struct ucontext uc;
struct siginfo info;
};
@@ -188,7 +192,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8;
- frame = (struct rt_sigframe *) ((unsigned long) frame - 128);
+ frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128);
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
goto out;
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 6acee5c4ada6..6fce9f45dfdc 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -45,7 +45,7 @@ static long arch_prctl_tt(int code, unsigned long addr)
case ARCH_GET_GS:
ret = arch_prctl(code, (unsigned long) &tmp);
if(!ret)
- ret = put_user(tmp, &addr);
+ ret = put_user(tmp, (long __user *)addr);
break;
default:
ret = -EINVAL;
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index e776139afb20..926c4743d13b 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -339,7 +339,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
struct mm_struct *mm = current->mm;
int i, ret;
- stack_base = IA32_STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE;
+ stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE;
mm->arg_start = bprm->p + stack_base;
bprm->p += stack_base;
@@ -357,7 +357,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
{
mpnt->vm_mm = mm;
mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
- mpnt->vm_end = IA32_STACK_TOP;
+ mpnt->vm_end = stack_top;
if (executable_stack == EXSTACK_ENABLE_X)
mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
else if (executable_stack == EXSTACK_DISABLE_X)
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 222b5b46d2b2..1ef6028f721e 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -149,7 +149,7 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsi
addr = start;
if (addr > ei->addr + ei->size)
continue;
- while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
+ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
;
last = addr + size;
if (last > ei->addr + ei->size)
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index c946e4fe67a7..586b34c00c48 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -281,12 +281,7 @@ tracesys:
ja 1f
movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
-1: SAVE_REST
- movq %rsp,%rdi
- call syscall_trace_leave
- RESTORE_TOP_OF_STACK %rbx
- RESTORE_REST
+1: movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
jmp int_ret_from_sys_call
CFI_ENDPROC
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0de3ea938830..9cc7031b7151 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
#include <linux/pci_ids.h>
#include <linux/pci.h>
+
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+ nvidia_hpet_detected = 1;
+ return 0;
+}
+#endif
+
/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
off. Check for an Nvidia or VIA PCI bridge and turn it off.
Use pci direct infrastructure because this runs before the PCI subsystem.
@@ -317,11 +329,19 @@ void __init check_ioapic(void)
return;
case PCI_VENDOR_ID_NVIDIA:
#ifdef CONFIG_ACPI
- /* All timer overrides on Nvidia
- seem to be wrong. Skip them. */
- acpi_skip_timer_override = 1;
- printk(KERN_INFO
- "Nvidia board detected. Ignoring ACPI timer override.\n");
+ /*
+ * All timer overrides on Nvidia are
+ * wrong unless HPET is enabled.
+ */
+ nvidia_hpet_detected = 0;
+ acpi_table_parse(ACPI_HPET,
+ nvidia_hpet_check);
+ if (nvidia_hpet_detected == 0) {
+ acpi_skip_timer_override = 1;
+ printk(KERN_INFO "Nvidia board "
+ "detected. Ignoring ACPI "
+ "timer override.\n");
+ }
#endif
/* RED-PEN skip them on mptables too? */
return;
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index af035ede70cd..a9275c9557cf 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -54,6 +54,10 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
else
#endif
node = numa_node_id();
+
+ if (node < first_node(node_online_map))
+ node = first_node(node_online_map);
+
page = alloc_pages_node(node, gfp, order);
return page ? page_address(page) : NULL;
}
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 2480d3f08a47..82a7c9bfdfa0 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -631,10 +631,8 @@ static int __init pci_iommu_init(void)
printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
if (end_pfn > MAX_DMA32_PFN) {
printk(KERN_ERR "WARNING more than 4GB of memory "
- "but IOMMU not compiled in.\n"
- KERN_ERR "WARNING 32bit PCI may malfunction.\n"
- KERN_ERR "You might want to enable "
- "CONFIG_GART_IOMMU\n");
+ "but IOMMU not available.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n");
}
return -1;
}
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index b0444a415bd6..bf421ed26808 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -68,7 +68,7 @@ int pmtimer_mark_offset(void)
offset_delay = delta % (USEC_PER_SEC / HZ);
rdtscll(tsc);
- vxtime.last_tsc = tsc - offset_delay * cpu_khz;
+ vxtime.last_tsc = tsc - offset_delay * (u64)cpu_khz / 1000;
/* don't calculate delay for first run,
or if we've got less then a tick */
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index f0870bef24d1..655b9192eeb3 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -1051,7 +1051,7 @@ static void srat_detect_node(void)
for now. */
node = apicid_to_node[hard_smp_processor_id()];
if (node == NUMA_NO_NODE)
- node = 0;
+ node = first_node(node_online_map);
numa_set_node(cpu, node);
if (acpi_numa > 0)
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index e1513532df29..474df22c6ed2 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -399,8 +399,10 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
/* First clean up the node list */
for (i = 0; i < MAX_NUMNODES; i++) {
cutoff_node(i, start, end);
- if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
+ if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
unparse_node(i);
+ node_set_offline(i);
+ }
}
if (acpi_numa <= 0)
diff --git a/block/as-iosched.c b/block/as-iosched.c
index e25a5d79ab27..a7caf35ca0c2 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
-static int as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
int i;
if (!arq_pool)
- return -ENOMEM;
+ return NULL;
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
if (!ad)
- return -ENOMEM;
+ return NULL;
memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
- return -ENOMEM;
+ return NULL;
}
ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
if (!ad->arq_pool) {
kfree(ad->hash);
kfree(ad);
- return -ENOMEM;
+ return NULL;
}
/* anticipatory scheduling helpers */
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
ad->antic_expire = default_antic_expire;
ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
- e->elevator_data = ad;
ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
if (ad->write_batch_count < 2)
ad->write_batch_count = 2;
- return 0;
+ return ad;
}
/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2540dfaa3e38..052b17487625 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -33,7 +33,7 @@ static int cfq_slice_idle = HZ / 70;
#define CFQ_KEY_ASYNC (0)
-static DEFINE_RWLOCK(cfq_exit_lock);
+static DEFINE_SPINLOCK(cfq_exit_lock);
/*
* for the hash of cfqq inside the cfqd
@@ -133,6 +133,7 @@ struct cfq_data {
mempool_t *crq_pool;
int rq_in_driver;
+ int hw_tag;
/*
* schedule slice state info
@@ -500,10 +501,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
/*
* if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted.
+ * isn't sorted, but insert at the back for fairness.
*/
if (preempted || list == &cfqd->busy_rr) {
- list_add(&cfqq->cfq_list, list);
+ if (preempted)
+ list = list->prev;
+
+ list_add_tail(&cfqq->cfq_list, list);
return;
}
@@ -664,6 +668,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
+
+ /*
+ * If the depth is larger 1, it really could be queueing. But lets
+ * make the mark a little higher - idling could still be good for
+ * low queueing, and a low queueing number could also just indicate
+ * a SCSI mid layer like behaviour where limit+1 is often seen.
+ */
+ if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
+ cfqd->hw_tag = 1;
}
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -879,6 +892,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
/*
+ * If no new queues are available, check if the busy list has some
+ * before falling back to idle io.
+ */
+ if (!cfqq && !list_empty(&cfqd->busy_rr))
+ cfqq = list_entry_cfqq(cfqd->busy_rr.next);
+
+ /*
* if we have idle queues and no rt or be queues had pending
* requests, either allow immediate service if the grace period
* has passed or arm the idle grace timer
@@ -1284,7 +1304,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
/*
* put the reference this task is holding to the various queues
*/
- read_lock_irqsave(&cfq_exit_lock, flags);
+ spin_lock_irqsave(&cfq_exit_lock, flags);
n = rb_first(&ioc->cic_root);
while (n != NULL) {
@@ -1294,7 +1314,7 @@ static void cfq_exit_io_context(struct io_context *ioc)
n = rb_next(n);
}
- read_unlock_irqrestore(&cfq_exit_lock, flags);
+ spin_unlock_irqrestore(&cfq_exit_lock, flags);
}
static struct cfq_io_context *
@@ -1303,17 +1323,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
if (cic) {
- RB_CLEAR(&cic->rb_node);
- cic->key = NULL;
- cic->cfqq[ASYNC] = NULL;
- cic->cfqq[SYNC] = NULL;
+ memset(cic, 0, sizeof(*cic));
+ RB_CLEAR_COLOR(&cic->rb_node);
cic->last_end_request = jiffies;
- cic->ttime_total = 0;
- cic->ttime_samples = 0;
- cic->ttime_mean = 0;
+ INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- INIT_LIST_HEAD(&cic->queue_list);
atomic_inc(&ioc_count);
}
@@ -1400,17 +1415,17 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
struct cfq_io_context *cic;
struct rb_node *n;
- write_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
n = rb_first(&ioc->cic_root);
while (n != NULL) {
cic = rb_entry(n, struct cfq_io_context, rb_node);
-
+
changed_ioprio(cic);
n = rb_next(n);
}
- write_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
return 0;
}
@@ -1458,7 +1473,8 @@ retry:
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (!cfqd->hw_tag)
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
@@ -1475,9 +1491,10 @@ out:
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{
- read_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
rb_erase(&cic->rb_node, &ioc->cic_root);
- read_unlock(&cfq_exit_lock);
+ list_del_init(&cic->queue_list);
+ spin_unlock(&cfq_exit_lock);
kmem_cache_free(cfq_ioc_pool, cic);
atomic_dec(&ioc_count);
}
@@ -1545,11 +1562,11 @@ restart:
BUG();
}
- read_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
list_add(&cic->queue_list, &cfqd->cic_list);
- read_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
}
/*
@@ -1648,7 +1665,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1739,14 +1756,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+ cic = crq->io_context;
+
/*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
- if (!cfq_crq_is_sync(crq))
+ if (!cfq_crq_is_sync(crq)) {
+ /*
+ * sync process issued an async request, if it's waiting
+ * then expire it and kick rq handling.
+ */
+ if (cic == cfqd->active_cic &&
+ del_timer(&cfqd->idle_slice_timer)) {
+ cfq_slice_expired(cfqd, 0);
+ cfq_start_queueing(cfqd, cfqq);
+ }
return;
-
- cic = crq->io_context;
+ }
cfq_update_io_thinktime(cfqd, cic);
cfq_update_io_seektime(cfqd, cic, crq);
@@ -2164,10 +2191,9 @@ static void cfq_idle_class_timer(unsigned long data)
* race with a non-idle queue, reset timer
*/
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
- if (!time_after_eq(jiffies, end)) {
- cfqd->idle_class_timer.expires = end;
- add_timer(&cfqd->idle_class_timer);
- } else
+ if (!time_after_eq(jiffies, end))
+ mod_timer(&cfqd->idle_class_timer, end);
+ else
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2187,7 +2213,7 @@ static void cfq_exit_queue(elevator_t *e)
cfq_shutdown_timer_wq(cfqd);
- write_lock(&cfq_exit_lock);
+ spin_lock(&cfq_exit_lock);
spin_lock_irq(q->queue_lock);
if (cfqd->active_queue)
@@ -2210,7 +2236,7 @@ static void cfq_exit_queue(elevator_t *e)
}
spin_unlock_irq(q->queue_lock);
- write_unlock(&cfq_exit_lock);
+ spin_unlock(&cfq_exit_lock);
cfq_shutdown_timer_wq(cfqd);
@@ -2220,14 +2246,14 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
if (!cfqd)
- return -ENOMEM;
+ return NULL;
memset(cfqd, 0, sizeof(*cfqd));
@@ -2257,8 +2283,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- e->elevator_data = cfqd;
-
cfqd->queue = q;
cfqd->max_queued = q->nr_requests / 4;
@@ -2285,14 +2309,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- return 0;
+ return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
- return -ENOMEM;
+ return NULL;
}
static void cfq_slab_kill(void)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 399fa1e60e1f..3bd0415a9828 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
* initialize elevator private data (deadline_data), and alloc a drq for
* each request on the free lists
*/
-static int deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
int i;
if (!drq_pool)
- return -ENOMEM;
+ return NULL;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd)
- return -ENOMEM;
+ return NULL;
memset(dd, 0, sizeof(*dd));
dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!dd->hash) {
kfree(dd);
- return -ENOMEM;
+ return NULL;
}
dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
if (!dd->drq_pool) {
kfree(dd->hash);
kfree(dd);
- return -ENOMEM;
+ return NULL;
}
for (i = 0; i < DL_HASH_ENTRIES; i++)
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- e->elevator_data = dd;
- return 0;
+ return dd;
}
static void deadline_put_request(request_queue_t *q, struct request *rq)
diff --git a/block/elevator.c b/block/elevator.c
index 8768a367fdde..a0afdd317cef 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
{
- int ret = 0;
+ return eq->ops->elevator_init_fn(q, eq);
+}
+static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+ void *data)
+{
q->elevator = eq;
-
- if (eq->ops->elevator_init_fn)
- ret = eq->ops->elevator_init_fn(q, eq);
-
- return ret;
+ eq->elevator_data = data;
}
static char chosen_elevator[16];
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
struct elevator_type *e = NULL;
struct elevator_queue *eq;
int ret = 0;
+ void *data;
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
if (!eq)
return -ENOMEM;
- ret = elevator_attach(q, eq);
- if (ret)
+ data = elevator_init_queue(q, eq);
+ if (!data) {
kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ elevator_attach(q, eq, data);
return ret;
}
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
return error;
}
+static void __elv_unregister_queue(elevator_t *e)
+{
+ kobject_uevent(&e->kobj, KOBJ_REMOVE);
+ kobject_del(&e->kobj);
+}
+
void elv_unregister_queue(struct request_queue *q)
{
- if (q) {
- elevator_t *e = q->elevator;
- kobject_uevent(&e->kobj, KOBJ_REMOVE);
- kobject_del(&e->kobj);
- }
+ if (q)
+ __elv_unregister_queue(q->elevator);
}
int elv_register(struct elevator_type *e)
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
+ void *data;
/*
* Allocate new elevator
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
if (!e)
return 0;
+ data = elevator_init_queue(q, e);
+ if (!data) {
+ kobject_put(&e->kobj);
+ return 0;
+ }
+
/*
* Turn on BYPASS and drain all requests w/ elevator private data
*/
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
elv_drain_elevator(q);
}
- spin_unlock_irq(q->queue_lock);
-
/*
- * unregister old elevator data
+ * Remember old elevator.
*/
- elv_unregister_queue(q);
old_elevator = q->elevator;
/*
* attach and start new elevator
*/
- if (elevator_attach(q, e))
- goto fail;
+ elevator_attach(q, e, data);
+
+ spin_unlock_irq(q->queue_lock);
+
+ __elv_unregister_queue(old_elevator);
if (elv_register_queue(q))
goto fail_register;
@@ -837,7 +851,6 @@ fail_register:
*/
elevator_exit(e);
e = NULL;
-fail:
q->elevator = old_elevator;
elv_register_queue(q);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index f370e4a7fe6d..56a7c620574f 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static int noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q, elevator_t *e)
{
struct noop_data *nd;
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
if (!nd)
- return -ENOMEM;
+ return NULL;
INIT_LIST_HEAD(&nd->queue);
- e->elevator_data = nd;
- return 0;
+ return nd;
}
static void noop_exit_queue(elevator_t *e)
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 07bc6dfe662b..8920e8c6e246 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -812,6 +812,9 @@ static int irqrouter_resume(struct sys_device *dev)
ACPI_FUNCTION_TRACE("irqrouter_resume");
+ /* Make sure SCI is enabled again (Apple firmware bug?) */
+ acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1, ACPI_MTX_DO_NOT_LOCK);
+
acpi_in_resume = 1;
list_for_each(node, &acpi_link.entries) {
link = list_entry(node, struct acpi_pci_link, node);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index abbdb37a7f5f..f36db22ce1ae 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
return_VALUE(-EBUSY);
}
+ WARN_ON(!performance);
+
pr->performance = performance;
if (acpi_processor_get_performance_info(pr)) {
@@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
return_VOID;
}
- kfree(pr->performance->states);
+ if (pr->performance)
+ kfree(pr->performance->states);
pr->performance = NULL;
acpi_cpufreq_remove_file(pr);
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index 662209d3f42d..2a769cc6f5f9 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -8,7 +8,6 @@
*
*/
-#include <linux/vt_kern.h>
#include <linux/device.h>
#include <linux/kallsyms.h>
#include <linux/pm.h>
@@ -66,6 +65,7 @@ int suspend_device(struct device * dev, pm_message_t state)
return error;
}
+
/**
* device_suspend - Save state and stop all devices in system.
* @state: Power state to put each device in.
@@ -85,9 +85,6 @@ int device_suspend(pm_message_t state)
{
int error = 0;
- if (!is_console_suspend_safe())
- return -EINVAL;
-
down(&dpm_sem);
down(&dpm_list_sem);
while (!list_empty(&dpm_active) && error == 0) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a59876a0bfa1..3170eaa25087 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1009,9 +1009,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
if (fp->f_mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
- goto err;
+ goto err_release;
if (!CDROM_CAN(CDC_RAM))
- goto err;
+ goto err_release;
ret = 0;
cdi->media_written = 0;
}
@@ -1026,6 +1026,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
not be mounting, but opening with O_NONBLOCK */
check_disk_change(ip->i_bdev);
return 0;
+err_release:
+ cdi->ops->release(cdi);
err:
cdi->use_count--;
return ret;
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f5b01c6d498e..fb919bfb2824 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
-obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
+obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b36eef0e9d19..02a7dd7a8a55 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1184,20 +1184,20 @@ static void port_outl(struct si_sm_io *io, unsigned int offset,
static void port_cleanup(struct smi_info *info)
{
unsigned int addr = info->io.addr_data;
- int mapsize;
+ int idx;
if (addr) {
- mapsize = ((info->io_size * info->io.regspacing)
- - (info->io.regspacing - info->io.regsize));
-
- release_region (addr, mapsize);
+ for (idx = 0; idx < info->io_size; idx++) {
+ release_region(addr + idx * info->io.regspacing,
+ info->io.regsize);
+ }
}
}
static int port_setup(struct smi_info *info)
{
unsigned int addr = info->io.addr_data;
- int mapsize;
+ int idx;
if (!addr)
return -ENODEV;
@@ -1225,16 +1225,22 @@ static int port_setup(struct smi_info *info)
return -EINVAL;
}
- /* Calculate the total amount of memory to claim. This is an
- * unusual looking calculation, but it avoids claiming any
- * more memory than it has to. It will claim everything
- * between the first address to the end of the last full
- * register. */
- mapsize = ((info->io_size * info->io.regspacing)
- - (info->io.regspacing - info->io.regsize));
-
- if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
- return -EIO;
+ /* Some BIOSes reserve disjoint I/O regions in their ACPI
+ * tables. This causes problems when trying to register the
+ * entire I/O region. Therefore we must register each I/O
+ * port separately.
+ */
+ for (idx = 0; idx < info->io_size; idx++) {
+ if (request_region(addr + idx * info->io.regspacing,
+ info->io.regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ while (idx--) {
+ release_region(addr + idx * info->io.regspacing,
+ info->io.regsize);
+ }
+ return -EIO;
+ }
+ }
return 0;
}
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index ede365d05387..b9371d5bf790 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1384,8 +1384,10 @@ do_it_again:
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
- if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+ n_tty_set_room(tty);
check_unthrottle(tty);
+ }
if (b - buf >= minimum)
break;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 128b2632512d..eab5394da666 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -149,7 +149,7 @@ struct cm4000_dev {
#define ZERO_DEV(dev) \
memset(&dev->atr_csum,0, \
sizeof(struct cm4000_dev) - \
- /*link*/ sizeof(struct pcmcia_device) - \
+ /*link*/ sizeof(struct pcmcia_device *) - \
/*node*/ sizeof(dev_node_t) - \
/*atr*/ MAX_ATR*sizeof(char) - \
/*rbuf*/ 512*sizeof(char) - \
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index e45f0d3d12de..a611972024e6 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -105,6 +105,12 @@ static const char* tcpa_event_type_strings[] = {
"Non-Host Info"
};
+struct tcpa_pc_event {
+ u32 event_id;
+ u32 event_size;
+ u8 event_data[0];
+};
+
enum tcpa_pc_event_ids {
SMBIOS = 1,
BIS_CERT,
@@ -114,14 +120,15 @@ enum tcpa_pc_event_ids {
NVRAM,
OPTION_ROM_EXEC,
OPTION_ROM_CONFIG,
- OPTION_ROM_MICROCODE,
+ OPTION_ROM_MICROCODE = 10,
S_CRTM_VERSION,
S_CRTM_CONTENTS,
POST_CONTENTS,
+ HOST_TABLE_OF_DEVICES,
};
static const char* tcpa_pc_event_id_strings[] = {
- ""
+ "",
"SMBIOS",
"BIS Certificate",
"POST BIOS ",
@@ -130,11 +137,12 @@ static const char* tcpa_pc_event_id_strings[] = {
"NVRAM",
"Option ROM",
"Option ROM config",
- "Option ROM microcode",
+ "",
+ "Option ROM microcode ",
"S-CRTM Version",
- "S-CRTM Contents",
- "S-CRTM POST Contents",
- "POST Contents",
+ "S-CRTM Contents ",
+ "POST Contents ",
+ "Table of Devices",
};
/* returns pointer to start of pos. entry of tcg log */
@@ -206,7 +214,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
const char *name = "";
char data[40] = "";
int i, n_len = 0, d_len = 0;
- u32 event_id;
+ struct tcpa_pc_event *pc_event;
switch(event->event_type) {
case PREBOOT:
@@ -235,31 +243,32 @@ static int get_event_name(char *dest, struct tcpa_event *event,
}
break;
case EVENT_TAG:
- event_id = be32_to_cpu(*((u32 *)event_entry));
+ pc_event = (struct tcpa_pc_event *)event_entry;
/* ToDo Row data -> Base64 */
- switch (event_id) {
+ switch (pc_event->event_id) {
case SMBIOS:
case BIS_CERT:
case CMOS:
case NVRAM:
case OPTION_ROM_EXEC:
case OPTION_ROM_CONFIG:
- case OPTION_ROM_MICROCODE:
case S_CRTM_VERSION:
- case S_CRTM_CONTENTS:
- case POST_CONTENTS:
- name = tcpa_pc_event_id_strings[event_id];
+ name = tcpa_pc_event_id_strings[pc_event->event_id];
n_len = strlen(name);
break;
+ /* hash data */
case POST_BIOS_ROM:
case ESCD:
- name = tcpa_pc_event_id_strings[event_id];
+ case OPTION_ROM_MICROCODE:
+ case S_CRTM_CONTENTS:
+ case POST_CONTENTS:
+ name = tcpa_pc_event_id_strings[pc_event->event_id];
n_len = strlen(name);
for (i = 0; i < 20; i++)
- d_len += sprintf(data, "%02x",
- event_entry[8 + i]);
+ d_len += sprintf(&data[2*i], "%02x",
+ pc_event->event_data[i]);
break;
default:
break;
@@ -275,53 +284,13 @@ static int get_event_name(char *dest, struct tcpa_event *event,
static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
+ struct tcpa_event *event = v;
+ char *data = v;
+ int i;
- char *eventname;
- char data[4];
- u32 help;
- int i, len;
- struct tcpa_event *event = (struct tcpa_event *) v;
- unsigned char *event_entry =
- (unsigned char *) (v + sizeof(struct tcpa_event));
-
- eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
- if (!eventname) {
- printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
- __func__);
- return -ENOMEM;
- }
-
- /* 1st: PCR used is in little-endian format (4 bytes) */
- help = le32_to_cpu(event->pcr_index);
- memcpy(data, &help, 4);
- for (i = 0; i < 4; i++)
- seq_putc(m, data[i]);
-
- /* 2nd: SHA1 (20 bytes) */
- for (i = 0; i < 20; i++)
- seq_putc(m, event->pcr_value[i]);
-
- /* 3rd: event type identifier (4 bytes) */
- help = le32_to_cpu(event->event_type);
- memcpy(data, &help, 4);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
seq_putc(m, data[i]);
- len = 0;
-
- len += get_event_name(eventname, event, event_entry);
-
- /* 4th: filename <= 255 + \'0' delimiter */
- if (len > TCG_EVENT_NAME_LEN_MAX)
- len = TCG_EVENT_NAME_LEN_MAX;
-
- for (i = 0; i < len; i++)
- seq_putc(m, eventname[i]);
-
- /* 5th: delimiter */
- seq_putc(m, '\0');
-
- kfree(eventname);
return 0;
}
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index acc5d47844eb..6c94879e0b99 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3238,14 +3238,6 @@ void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
}
}
-int is_console_suspend_safe(void)
-{
- /* It is unsafe to suspend devices while X has control of the
- * hardware. Make sure we are running on a kernel-controlled console.
- */
- return vc_cons[fg_console].d->vc_mode == KD_TEXT;
-}
-
/*
* Visible symbols for modules
*/
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 43b96e298363..27c9eb989a9a 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -345,17 +345,17 @@ sgiioc4_resetproc(ide_drive_t * drive)
static u8
sgiioc4_INB(unsigned long port)
{
- u8 reg = (u8) inb(port);
+ u8 reg = (u8) readb((void __iomem *) port);
if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
if (reg & 0x51) { /* Not busy...check for interrupt */
unsigned long other_ir = port - 0x110;
- unsigned int intr_reg = (u32) inl(other_ir);
+ unsigned int intr_reg = (u32) readl((void __iomem *) other_ir);
/* Clear the Interrupt, Error bits on the IOC4 */
if (intr_reg & 0x03) {
- outl(0x03, other_ir);
- intr_reg = (u32) inl(other_ir);
+ writel(0x03, (void __iomem *) other_ir);
+ intr_reg = (u32) readl((void __iomem *) other_ir);
}
}
}
@@ -606,6 +606,12 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
hwif->ide_dma_host_off = &sgiioc4_ide_dma_host_off;
hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq;
hwif->ide_dma_timeout = &__ide_dma_timeout;
+
+ /*
+ * The IOC4 uses MMIO rather than Port IO.
+ * It also needs special workarounds for INB.
+ */
+ default_hwif_mmiops(hwif);
hwif->INB = &sgiioc4_INB;
}
@@ -743,6 +749,6 @@ ioc4_ide_exit(void)
module_init(ioc4_ide_init);
module_exit(ioc4_ide_exit);
-MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)");
+MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon");
MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card");
MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 8a23fb54c693..5413dc43b9f1 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -845,7 +845,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
0x010000000000ULL, CSR1212_ALL_SPACE_END);
- if (!scsi_id->status_fifo_addr) {
+ if (scsi_id->status_fifo_addr == ~0ULL) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a54da42849ae..8406839b91cf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -275,6 +275,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
spin_lock_irqsave(&priv->tx_lock, flags);
++priv->tx_tail;
if (netif_queue_stopped(dev) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
netif_wake_queue(dev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ec802913f977..f19b874753a9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -167,6 +167,15 @@ void md_new_event(mddev_t *mddev)
}
EXPORT_SYMBOL_GPL(md_new_event);
+/* Alternate version that can be called from interrupts
+ * when calling sysfs_notify isn't needed.
+ */
+void md_new_event_inintr(mddev_t *mddev)
+{
+ atomic_inc(&md_event_count);
+ wake_up(&md_event_waiters);
+}
+
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
@@ -4149,7 +4158,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- md_new_event(mddev);
+ md_new_event_inintr(mddev);
}
/* seq_file implementation /proc/mdstat */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 9080853fe283..a30084076ac8 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1605,6 +1605,21 @@ mpt_resume(struct pci_dev *pdev)
}
#endif
+static int
+mpt_signal_reset(int index, MPT_ADAPTER *ioc, int reset_phase)
+{
+ if ((MptDriverClass[index] == MPTSPI_DRIVER &&
+ ioc->bus_type != SPI) ||
+ (MptDriverClass[index] == MPTFC_DRIVER &&
+ ioc->bus_type != FC) ||
+ (MptDriverClass[index] == MPTSAS_DRIVER &&
+ ioc->bus_type != SAS))
+ /* make sure we only call the relevant reset handler
+ * for the bus */
+ return 0;
+ return (MptResetHandlers[index])(ioc, reset_phase);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_do_ioc_recovery - Initialize or recover MPT adapter.
@@ -1885,14 +1900,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if ((ret == 0) && MptResetHandlers[ii]) {
dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n",
ioc->name, ii));
- rc += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET);
+ rc += mpt_signal_reset(ii, ioc, MPT_IOC_POST_RESET);
handlers++;
}
if (alt_ioc_ready && MptResetHandlers[ii]) {
drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
ioc->name, ioc->alt_ioc->name, ii));
- rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET);
+ rc += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_POST_RESET);
handlers++;
}
}
@@ -3267,11 +3282,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
if (MptResetHandlers[ii]) {
dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n",
ioc->name, ii));
- r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET);
+ r += mpt_signal_reset(ii, ioc, MPT_IOC_PRE_RESET);
if (ioc->alt_ioc) {
dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n",
ioc->name, ioc->alt_ioc->name, ii));
- r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET);
+ r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_PRE_RESET);
}
}
}
@@ -5706,11 +5721,11 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
if (MptResetHandlers[ii]) {
dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n",
ioc->name, ii));
- r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_SETUP_RESET);
+ r += mpt_signal_reset(ii, ioc, MPT_IOC_SETUP_RESET);
if (ioc->alt_ioc) {
dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n",
ioc->name, ioc->alt_ioc->name, ii));
- r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_SETUP_RESET);
+ r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
}
}
}
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index f2a4d382ea19..3201de053943 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
return rc;
}
+#ifdef CONFIG_PM
/*
* spi module resume handler
*/
@@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev)
return rc;
}
+#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 5ea133c59afb..7bd4d85d0b42 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -55,6 +55,7 @@ struct i2o_exec_wait {
u32 m; /* message id */
struct i2o_message *msg; /* pointer to the reply message */
struct list_head list; /* node in global wait list */
+ spinlock_t lock; /* lock before modifying */
};
/* Work struct needed to handle LCT NOTIFY replies */
@@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
return NULL;
INIT_LIST_HEAD(&wait->list);
+ spin_lock_init(&wait->lock);
return wait;
};
@@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
DECLARE_WAIT_QUEUE_HEAD(wq);
struct i2o_exec_wait *wait;
static u32 tcntxt = 0x80000000;
+ long flags;
int rc = 0;
wait = i2o_exec_wait_alloc();
@@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
wait->tcntxt = tcntxt++;
msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
+ wait->wq = &wq;
+ /*
+ * we add elements to the head, because if a entry in the list will
+ * never be removed, we have to iterate over it every time
+ */
+ list_add(&wait->list, &i2o_exec_wait_list);
+
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
i2o_msg_post(c, msg);
- if (!wait->complete) {
- wait->wq = &wq;
- /*
- * we add elements add the head, because if a entry in the list
- * will never be removed, we have to iterate over it every time
- */
- list_add(&wait->list, &i2o_exec_wait_list);
-
- wait_event_interruptible_timeout(wq, wait->complete,
- timeout * HZ);
+ wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
- wait->wq = NULL;
- }
+ spin_lock_irqsave(&wait->lock, flags);
- barrier();
+ wait->wq = NULL;
- if (wait->complete) {
+ if (wait->complete)
rc = le32_to_cpu(wait->msg->body[0]) >> 24;
- i2o_flush_reply(c, wait->m);
- i2o_exec_wait_free(wait);
- } else {
+ else {
/*
* We cannot remove it now. This is important. When it does
* terminate (which it must do if the controller has not
@@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
rc = -ETIMEDOUT;
}
+ spin_unlock_irqrestore(&wait->lock, flags);
+
+ if (rc != -ETIMEDOUT) {
+ i2o_flush_reply(c, wait->m);
+ i2o_exec_wait_free(wait);
+ }
+
return rc;
};
@@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
{
struct i2o_exec_wait *wait, *tmp;
unsigned long flags;
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
int rc = 1;
/*
@@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
* already expired. Not much we can do about that except log it for
* debug purposes, increase timeout, and recompile.
*/
- spin_lock_irqsave(&lock, flags);
list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
if (wait->tcntxt == context) {
- list_del(&wait->list);
+ spin_lock_irqsave(&wait->lock, flags);
- spin_unlock_irqrestore(&lock, flags);
+ list_del(&wait->list);
wait->m = m;
wait->msg = msg;
wait->complete = 1;
- barrier();
-
- if (wait->wq) {
- wake_up_interruptible(wait->wq);
+ if (wait->wq)
rc = 0;
- } else {
+ else
+ rc = -1;
+
+ spin_unlock_irqrestore(&wait->lock, flags);
+
+ if (rc) {
struct device *dev;
dev = &c->pdev->dev;
@@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
c->name);
i2o_dma_free(dev, &wait->dma);
i2o_exec_wait_free(wait);
- rc = -1;
- }
+ } else
+ wake_up_interruptible(wait->wq);
return rc;
}
}
- spin_unlock_irqrestore(&lock, flags);
-
osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
context);
@@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
static int i2o_exec_probe(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
- struct i2o_controller *c = i2o_dev->iop;
i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
- c->exec = i2o_dev;
-
- i2o_exec_lct_notify(c, c->lct->change_ind + 1);
-
device_create_file(dev, &dev_attr_vendor_id);
device_create_file(dev, &dev_attr_product_id);
@@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
struct device *dev;
struct i2o_message *msg;
+ down(&c->lct_lock);
+
dev = &c->pdev->dev;
if (i2o_dma_realloc
@@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
i2o_msg_post(c, msg);
+ up(&c->lct_lock);
+
return 0;
};
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 492167446936..febbdd4e0605 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c)
/* Ask the IOP to switch to RESET state */
i2o_iop_reset(c);
-
- put_device(&c->device);
}
/**
@@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void)
snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
if (i2o_pool_alloc
- (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
+ (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32),
I2O_MSG_INPOOL_MIN)) {
kfree(c);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 003b077c2324..45bcf098e762 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -84,7 +84,7 @@ config MMC_WBSD
config MMC_AU1X
tristate "Alchemy AU1XX0 MMC Card Interface support"
- depends on SOC_AU1X00 && MMC
+ depends on MMC && SOC_AU1200
help
This selects the AMD Alchemy(R) Multimedia card interface.
If you have a Alchemy platform with a MMC slot, say Y or M here.
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index f6d51ce34b00..bb44509fd404 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -909,7 +909,7 @@ MODULE_PARM_DESC(irq, "EtherLink IRQ number");
* here also causes the module to be unloaded
*/
-int init_module(void)
+int __init init_module(void)
{
dev_3c501 = el1_probe(-1);
if (IS_ERR(dev_3c501))
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index dcc98afa65d7..cb5ef75450dc 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -688,7 +688,7 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
+int __init
init_module(void)
{
struct net_device *dev;
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 111601ca4ca3..19c0b856c488 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1633,7 +1633,7 @@ MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
-int init_module(void)
+int __init init_module(void)
{
int this_dev, found = 0;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 4db82893909c..6039049259ed 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -932,7 +932,7 @@ module_param(irq, int, 0);
MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
MODULE_PARM_DESC(irq, "(ignored)");
-int init_module(void)
+int __init init_module(void)
{
if (io == 0)
printk("3c507: You should not use auto-probing with insmod!\n");
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index b40885d41680..4bf8510655c5 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -1277,7 +1277,7 @@ MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
MODULE_LICENSE("GPL");
-int init_module(void)
+int __init init_module(void)
{
int this_dev,found = 0;
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6db3301e7965..1b1cb0026072 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1646,7 +1646,7 @@ static struct net_device *this_device;
* insmod multiple modules for now but it's a hack.
*/
-int init_module(void)
+int __init init_module(void)
{
this_device = mc32_probe(-1);
if (IS_ERR(this_device))
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 066e22b01a94..46d8c01437e9 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -19,11 +19,11 @@
See the file COPYING in this distribution for more information.
Contributors:
-
+
Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
-
+
TODO:
* Test Tx checksumming thoroughly
* Implement dev->tx_timeout
@@ -461,7 +461,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static inline void cp_set_rxbufsize (struct cp_private *cp)
{
unsigned int mtu = cp->dev->mtu;
-
+
if (mtu > ETH_DATA_LEN)
/* MTU + ethernet header + FCS + optional VLAN tag */
cp->rx_buf_sz = mtu + ETH_HLEN + 8;
@@ -510,7 +510,7 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
-
+
if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
return 1;
else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
@@ -1061,7 +1061,7 @@ static void cp_init_hw (struct cp_private *cp)
cpw8(Config3, PARMEnable);
cp->wol_enabled = 0;
- cpw8(Config5, cpr8(Config5) & PMEStatus);
+ cpw8(Config5, cpr8(Config5) & PMEStatus);
cpw32_f(HiTxRingAddr, 0);
cpw32_f(HiTxRingAddr + 4, 0);
@@ -1351,7 +1351,7 @@ static void netdev_get_wol (struct cp_private *cp,
WAKE_MCAST | WAKE_UCAST;
/* We don't need to go on if WOL is disabled */
if (!cp->wol_enabled) return;
-
+
options = cpr8 (Config3);
if (options & LinkUp) wol->wolopts |= WAKE_PHY;
if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
@@ -1919,7 +1919,7 @@ static int cp_resume (struct pci_dev *pdev)
mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
spin_unlock_irqrestore (&cp->lock, flags);
-
+
return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index feae7832fc84..abd6261465f1 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -165,7 +165,7 @@ static int multicast_filter_limit = 32;
static int debug = -1;
/*
- * Receive ring size
+ * Receive ring size
* Warning: 64K ring has hardware issues and may lock up.
*/
#if defined(CONFIG_SH_DREAMCAST)
@@ -257,7 +257,7 @@ static struct pci_device_id rtl8139_pci_tbl[] = {
{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
- {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
#ifdef CONFIG_SH_SECUREEDGE5410
/* Bogus 8139 silicon reports 8129 without external PROM :-( */
@@ -1824,7 +1824,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
int tmp_work;
#endif
- if (netif_msg_rx_err (tp))
+ if (netif_msg_rx_err (tp))
printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
dev->name, rx_status);
tp->stats.rx_errors++;
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
RTL_R16 (RxBufAddr),
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
- while (netif_running(dev) && received < budget
+ while (netif_running(dev) && received < budget
&& (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
u32 ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
@@ -2031,7 +2031,7 @@ no_early_rx:
netif_receive_skb (skb);
} else {
- if (net_ratelimit())
+ if (net_ratelimit())
printk (KERN_WARNING
"%s: Memory squeeze, dropping packet.\n",
dev->name);
@@ -2158,13 +2158,13 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
status = RTL_R16 (IntrStatus);
/* shared irq? */
- if (unlikely((status & rtl8139_intr_mask) == 0))
+ if (unlikely((status & rtl8139_intr_mask) == 0))
goto out;
handled = 1;
/* h/w no longer present (hotplug?) or major error, bail */
- if (unlikely(status == 0xFFFF))
+ if (unlikely(status == 0xFFFF))
goto out;
/* close possible race's with dev_close */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bdaaad8f2123..20bdb9732a09 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -447,6 +447,7 @@ config MIPS_GT96100ETH
config MIPS_AU1X00_ENET
bool "MIPS AU1000 Ethernet support"
depends on NET_ETHERNET && SOC_AU1X00
+ select PHYLIB
select CRC32
help
If you have an Alchemy Semi AU1X00 based system
@@ -865,6 +866,22 @@ config DM9000
<file:Documentation/networking/net-modules.txt>. The module will be
called dm9000.
+config SMC911X
+ tristate "SMSC LAN911[5678] support"
+ select CRC32
+ select MII
+ depends on NET_ETHERNET && ARCH_PXA
+ help
+ This is a driver for SMSC's LAN911x series of Ethernet chipsets
+ including the new LAN9115, LAN9116, LAN9117, and LAN9118.
+ Say Y if you want it compiled into the kernel,
+ and read the Ethernet-HOWTO, available from
+ <http://www.linuxdoc.org/docs.html#howto>.
+
+ This driver is also available as a module. The module will be
+ called smc911x. If you want to compile it as a module, say M
+ here and read <file:Documentation/modules.txt>
+
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on NET_ETHERNET && ISA
@@ -2311,6 +2328,23 @@ config S2IO_NAPI
If in doubt, say N.
+config MYRI10GE
+ tristate "Myricom Myri-10G Ethernet support"
+ depends on PCI
+ select FW_LOADER
+ select CRC32
+ ---help---
+ This driver supports Myricom Myri-10G Dual Protocol interface in
+ Ethernet mode. If the eeprom on your board is not recent enough,
+ you will need a newer firmware image.
+ You may get this image or more information, at:
+
+ <http://www.myri.com/Myri-10G/>
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called myri10ge.
+
endmenu
source "drivers/net/tokenring/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90468aea077..1eced3287507 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -192,7 +192,9 @@ obj-$(CONFIG_R8169) += r8169.o
obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
obj-$(CONFIG_IBMVETH) += ibmveth.o
obj-$(CONFIG_S2IO) += s2io.o
+obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
+obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 14dbad14afb6..038d5fcb15e6 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -2,13 +2,16 @@
*
* Alchemy Au1x00 ethernet driver
*
- * Copyright 2001,2002,2003 MontaVista Software Inc.
+ * Copyright 2001-2003, 2006 MontaVista Software Inc.
* Copyright 2002 TimeSys Corp.
* Added ethtool/mii-tool support,
* Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
* Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
* or riemer@riemer-nt.de: fixed the link beat detection with
* ioctls (SIOCGMIIPHY)
+ * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ * converted to use linux-2.6.x's PHY framework
+ *
* Author: MontaVista Software, Inc.
* ppopov@mvista.com or source@mvista.com
*
@@ -53,6 +56,7 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/crc32.h>
+#include <linux/phy.h>
#include <asm/mipsregs.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -68,7 +72,7 @@ static int au1000_debug = 5;
static int au1000_debug = 3;
#endif
-#define DRV_NAME "au1000eth"
+#define DRV_NAME "au1000_eth"
#define DRV_VERSION "1.5"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
@@ -80,7 +84,7 @@ MODULE_LICENSE("GPL");
// prototypes
static void hard_stop(struct net_device *);
static void enable_rx_tx(struct net_device *dev);
-static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
+static struct net_device * au1000_probe(int port_num);
static int au1000_init(struct net_device *);
static int au1000_open(struct net_device *);
static int au1000_close(struct net_device *);
@@ -88,17 +92,15 @@ static int au1000_tx(struct sk_buff *, struct net_device *);
static int au1000_rx(struct net_device *);
static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
static void au1000_tx_timeout(struct net_device *);
-static int au1000_set_config(struct net_device *dev, struct ifmap *map);
static void set_rx_mode(struct net_device *);
static struct net_device_stats *au1000_get_stats(struct net_device *);
-static void au1000_timer(unsigned long);
static int au1000_ioctl(struct net_device *, struct ifreq *, int);
static int mdio_read(struct net_device *, int, int);
static void mdio_write(struct net_device *, int, int, u16);
-static void dump_mii(struct net_device *dev, int phy_id);
+static void au1000_adjust_link(struct net_device *);
+static void enable_mac(struct net_device *, int);
// externs
-extern void ack_rise_edge_irq(unsigned int);
extern int get_ethernet_addr(char *ethernet_addr);
extern void str2eaddr(unsigned char *ea, unsigned char *str);
extern char * __init prom_getcmdline(void);
@@ -126,705 +128,83 @@ static unsigned char au1000_mac_addr[6] __devinitdata = {
0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
};
-#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
-#define RUN_AT(x) (jiffies + (x))
-
-// For reading/writing 32-bit words from/to DMA memory
-#define cpu_to_dma32 cpu_to_be32
-#define dma32_to_cpu be32_to_cpu
-
struct au1000_private *au_macs[NUM_ETH_INTERFACES];
-/* FIXME
- * All of the PHY code really should be detached from the MAC
- * code.
+/*
+ * board-specific configurations
+ *
+ * PHY detection algorithm
+ *
+ * If AU1XXX_PHY_STATIC_CONFIG is undefined, the PHY setup is
+ * autodetected:
+ *
+ * mii_probe() first searches the current MAC's MII bus for a PHY,
+ * selecting the first (or last, if AU1XXX_PHY_SEARCH_HIGHEST_ADDR is
+ * defined) PHY address not already claimed by another netdev.
+ *
+ * If nothing was found that way when searching for the 2nd ethernet
+ * controller's PHY and AU1XXX_PHY1_SEARCH_ON_MAC0 is defined, then
+ * the first MII bus is searched as well for an unclaimed PHY; this is
+ * needed in case of a dual-PHY accessible only through the MAC0's MII
+ * bus.
+ *
+ * Finally, if no PHY is found, then the corresponding ethernet
+ * controller is not registered to the network subsystem.
*/
-/* Default advertise */
-#define GENMII_DEFAULT_ADVERTISE \
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
- ADVERTISED_Autoneg
-
-#define GENMII_DEFAULT_FEATURES \
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
- SUPPORTED_Autoneg
-
-int bcm_5201_init(struct net_device *dev, int phy_addr)
-{
- s16 data;
-
- /* Stop auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
-
- /* Set advertisement to 10/100 and Half/Full duplex
- * (full capabilities) */
- data = mdio_read(dev, phy_addr, MII_ANADV);
- data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
- mdio_write(dev, phy_addr, MII_ANADV, data);
-
- /* Restart auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
- mdio_write(dev, phy_addr, MII_CONTROL, data);
-
- if (au1000_debug > 4)
- dump_mii(dev, phy_addr);
- return 0;
-}
-
-int bcm_5201_reset(struct net_device *dev, int phy_addr)
-{
- s16 mii_control, timeout;
-
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
- mdelay(1);
- for (timeout = 100; timeout > 0; --timeout) {
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- if ((mii_control & MII_CNTL_RESET) == 0)
- break;
- mdelay(1);
- }
- if (mii_control & MII_CNTL_RESET) {
- printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-int
-bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- u16 mii_data;
- struct au1000_private *aup;
-
- if (!dev) {
- printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
- return -1;
- }
- aup = (struct au1000_private *) dev->priv;
-
- mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
- if (mii_data & MII_STAT_LINK) {
- *link = 1;
- mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
- if (mii_data & MII_AUX_100) {
- if (mii_data & MII_AUX_FDX) {
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- }
- else {
- *speed = IF_PORT_100BASETX;
- dev->if_port = IF_PORT_100BASETX;
- }
- }
- else {
- *speed = IF_PORT_10BASET;
- dev->if_port = IF_PORT_10BASET;
- }
-
- }
- else {
- *link = 0;
- *speed = 0;
- dev->if_port = IF_PORT_UNKNOWN;
- }
- return 0;
-}
-
-int lsi_80227_init(struct net_device *dev, int phy_addr)
-{
- if (au1000_debug > 4)
- printk("lsi_80227_init\n");
-
- /* restart auto-negotiation */
- mdio_write(dev, phy_addr, MII_CONTROL,
- MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
- mdelay(1);
-
- /* set up LEDs to correct display */
-#ifdef CONFIG_MIPS_MTX1
- mdio_write(dev, phy_addr, 17, 0xff80);
-#else
- mdio_write(dev, phy_addr, 17, 0xffc0);
-#endif
-
- if (au1000_debug > 4)
- dump_mii(dev, phy_addr);
- return 0;
-}
-
-int lsi_80227_reset(struct net_device *dev, int phy_addr)
-{
- s16 mii_control, timeout;
-
- if (au1000_debug > 4) {
- printk("lsi_80227_reset\n");
- dump_mii(dev, phy_addr);
- }
-
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
- mdelay(1);
- for (timeout = 100; timeout > 0; --timeout) {
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- if ((mii_control & MII_CNTL_RESET) == 0)
- break;
- mdelay(1);
- }
- if (mii_control & MII_CNTL_RESET) {
- printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-int
-lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- u16 mii_data;
- struct au1000_private *aup;
-
- if (!dev) {
- printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
- return -1;
- }
- aup = (struct au1000_private *) dev->priv;
-
- mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
- if (mii_data & MII_STAT_LINK) {
- *link = 1;
- mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
- if (mii_data & MII_LSI_PHY_STAT_SPD) {
- if (mii_data & MII_LSI_PHY_STAT_FDX) {
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- }
- else {
- *speed = IF_PORT_100BASETX;
- dev->if_port = IF_PORT_100BASETX;
- }
- }
- else {
- *speed = IF_PORT_10BASET;
- dev->if_port = IF_PORT_10BASET;
- }
-
- }
- else {
- *link = 0;
- *speed = 0;
- dev->if_port = IF_PORT_UNKNOWN;
- }
- return 0;
-}
-
-int am79c901_init(struct net_device *dev, int phy_addr)
-{
- printk("am79c901_init\n");
- return 0;
-}
-
-int am79c901_reset(struct net_device *dev, int phy_addr)
-{
- printk("am79c901_reset\n");
- return 0;
-}
-
-int
-am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- return 0;
-}
-
-int am79c874_init(struct net_device *dev, int phy_addr)
-{
- s16 data;
-
- /* 79c874 has quit resembled bit assignments to BCM5201 */
- if (au1000_debug > 4)
- printk("am79c847_init\n");
-
- /* Stop auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
-
- /* Set advertisement to 10/100 and Half/Full duplex
- * (full capabilities) */
- data = mdio_read(dev, phy_addr, MII_ANADV);
- data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
- mdio_write(dev, phy_addr, MII_ANADV, data);
-
- /* Restart auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
-
- mdio_write(dev, phy_addr, MII_CONTROL, data);
-
- if (au1000_debug > 4) dump_mii(dev, phy_addr);
- return 0;
-}
-
-int am79c874_reset(struct net_device *dev, int phy_addr)
-{
- s16 mii_control, timeout;
-
- if (au1000_debug > 4)
- printk("am79c874_reset\n");
-
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
- mdelay(1);
- for (timeout = 100; timeout > 0; --timeout) {
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- if ((mii_control & MII_CNTL_RESET) == 0)
- break;
- mdelay(1);
- }
- if (mii_control & MII_CNTL_RESET) {
- printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-int
-am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- u16 mii_data;
- struct au1000_private *aup;
-
- // printk("am79c874_status\n");
- if (!dev) {
- printk(KERN_ERR "am79c874_status error: NULL dev\n");
- return -1;
- }
-
- aup = (struct au1000_private *) dev->priv;
- mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
-
- if (mii_data & MII_STAT_LINK) {
- *link = 1;
- mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
- if (mii_data & MII_AMD_PHY_STAT_SPD) {
- if (mii_data & MII_AMD_PHY_STAT_FDX) {
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- }
- else {
- *speed = IF_PORT_100BASETX;
- dev->if_port = IF_PORT_100BASETX;
- }
- }
- else {
- *speed = IF_PORT_10BASET;
- dev->if_port = IF_PORT_10BASET;
- }
-
- }
- else {
- *link = 0;
- *speed = 0;
- dev->if_port = IF_PORT_UNKNOWN;
- }
- return 0;
-}
-
-int lxt971a_init(struct net_device *dev, int phy_addr)
-{
- if (au1000_debug > 4)
- printk("lxt971a_init\n");
-
- /* restart auto-negotiation */
- mdio_write(dev, phy_addr, MII_CONTROL,
- MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
-
- /* set up LEDs to correct display */
- mdio_write(dev, phy_addr, 20, 0x0422);
-
- if (au1000_debug > 4)
- dump_mii(dev, phy_addr);
- return 0;
-}
-
-int lxt971a_reset(struct net_device *dev, int phy_addr)
-{
- s16 mii_control, timeout;
-
- if (au1000_debug > 4) {
- printk("lxt971a_reset\n");
- dump_mii(dev, phy_addr);
- }
-
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
- mdelay(1);
- for (timeout = 100; timeout > 0; --timeout) {
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- if ((mii_control & MII_CNTL_RESET) == 0)
- break;
- mdelay(1);
- }
- if (mii_control & MII_CNTL_RESET) {
- printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-int
-lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- u16 mii_data;
- struct au1000_private *aup;
-
- if (!dev) {
- printk(KERN_ERR "lxt971a_status error: NULL dev\n");
- return -1;
- }
- aup = (struct au1000_private *) dev->priv;
-
- mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
- if (mii_data & MII_STAT_LINK) {
- *link = 1;
- mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
- if (mii_data & MII_INTEL_PHY_STAT_SPD) {
- if (mii_data & MII_INTEL_PHY_STAT_FDX) {
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- }
- else {
- *speed = IF_PORT_100BASETX;
- dev->if_port = IF_PORT_100BASETX;
- }
- }
- else {
- *speed = IF_PORT_10BASET;
- dev->if_port = IF_PORT_10BASET;
- }
-
- }
- else {
- *link = 0;
- *speed = 0;
- dev->if_port = IF_PORT_UNKNOWN;
- }
- return 0;
-}
-
-int ks8995m_init(struct net_device *dev, int phy_addr)
-{
- s16 data;
-
-// printk("ks8995m_init\n");
- /* Stop auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
-
- /* Set advertisement to 10/100 and Half/Full duplex
- * (full capabilities) */
- data = mdio_read(dev, phy_addr, MII_ANADV);
- data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
- mdio_write(dev, phy_addr, MII_ANADV, data);
-
- /* Restart auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
- mdio_write(dev, phy_addr, MII_CONTROL, data);
-
- if (au1000_debug > 4) dump_mii(dev, phy_addr);
-
- return 0;
-}
-
-int ks8995m_reset(struct net_device *dev, int phy_addr)
-{
- s16 mii_control, timeout;
-
-// printk("ks8995m_reset\n");
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
- mdelay(1);
- for (timeout = 100; timeout > 0; --timeout) {
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- if ((mii_control & MII_CNTL_RESET) == 0)
- break;
- mdelay(1);
- }
- if (mii_control & MII_CNTL_RESET) {
- printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- u16 mii_data;
- struct au1000_private *aup;
-
- if (!dev) {
- printk(KERN_ERR "ks8995m_status error: NULL dev\n");
- return -1;
- }
- aup = (struct au1000_private *) dev->priv;
-
- mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
- if (mii_data & MII_STAT_LINK) {
- *link = 1;
- mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
- if (mii_data & MII_AUX_100) {
- if (mii_data & MII_AUX_FDX) {
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- }
- else {
- *speed = IF_PORT_100BASETX;
- dev->if_port = IF_PORT_100BASETX;
- }
- }
- else {
- *speed = IF_PORT_10BASET;
- dev->if_port = IF_PORT_10BASET;
- }
-
- }
- else {
- *link = 0;
- *speed = 0;
- dev->if_port = IF_PORT_UNKNOWN;
- }
- return 0;
-}
-
-int
-smsc_83C185_init (struct net_device *dev, int phy_addr)
-{
- s16 data;
-
- if (au1000_debug > 4)
- printk("smsc_83C185_init\n");
-
- /* Stop auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
-
- /* Set advertisement to 10/100 and Half/Full duplex
- * (full capabilities) */
- data = mdio_read(dev, phy_addr, MII_ANADV);
- data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
- mdio_write(dev, phy_addr, MII_ANADV, data);
-
- /* Restart auto-negotiation */
- data = mdio_read(dev, phy_addr, MII_CONTROL);
- data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
-
- mdio_write(dev, phy_addr, MII_CONTROL, data);
-
- if (au1000_debug > 4) dump_mii(dev, phy_addr);
- return 0;
-}
-
-int
-smsc_83C185_reset (struct net_device *dev, int phy_addr)
-{
- s16 mii_control, timeout;
-
- if (au1000_debug > 4)
- printk("smsc_83C185_reset\n");
-
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
- mdelay(1);
- for (timeout = 100; timeout > 0; --timeout) {
- mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
- if ((mii_control & MII_CNTL_RESET) == 0)
- break;
- mdelay(1);
- }
- if (mii_control & MII_CNTL_RESET) {
- printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-int
-smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- u16 mii_data;
- struct au1000_private *aup;
-
- if (!dev) {
- printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
- return -1;
- }
-
- aup = (struct au1000_private *) dev->priv;
- mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
-
- if (mii_data & MII_STAT_LINK) {
- *link = 1;
- mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
- if (mii_data & (1<<3)) {
- if (mii_data & (1<<4)) {
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- }
- else {
- *speed = IF_PORT_100BASETX;
- dev->if_port = IF_PORT_100BASETX;
- }
- }
- else {
- *speed = IF_PORT_10BASET;
- dev->if_port = IF_PORT_10BASET;
- }
- }
- else {
- *link = 0;
- *speed = 0;
- dev->if_port = IF_PORT_UNKNOWN;
- }
- return 0;
-}
-
-
-#ifdef CONFIG_MIPS_BOSPORUS
-int stub_init(struct net_device *dev, int phy_addr)
-{
- //printk("PHY stub_init\n");
- return 0;
-}
-
-int stub_reset(struct net_device *dev, int phy_addr)
-{
- //printk("PHY stub_reset\n");
- return 0;
-}
-
-int
-stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
-{
- //printk("PHY stub_status\n");
- *link = 1;
- /* hmmm, revisit */
- *speed = IF_PORT_100BASEFX;
- dev->if_port = IF_PORT_100BASEFX;
- return 0;
-}
-#endif
-
-struct phy_ops bcm_5201_ops = {
- bcm_5201_init,
- bcm_5201_reset,
- bcm_5201_status,
-};
-
-struct phy_ops am79c874_ops = {
- am79c874_init,
- am79c874_reset,
- am79c874_status,
-};
-
-struct phy_ops am79c901_ops = {
- am79c901_init,
- am79c901_reset,
- am79c901_status,
-};
-
-struct phy_ops lsi_80227_ops = {
- lsi_80227_init,
- lsi_80227_reset,
- lsi_80227_status,
-};
+/* autodetection defaults */
+#undef AU1XXX_PHY_SEARCH_HIGHEST_ADDR
+#define AU1XXX_PHY1_SEARCH_ON_MAC0
-struct phy_ops lxt971a_ops = {
- lxt971a_init,
- lxt971a_reset,
- lxt971a_status,
-};
+/* static PHY setup
+ *
+ * most boards PHY setup should be detectable properly with the
+ * autodetection algorithm in mii_probe(), but in some cases (e.g. if
+ * you have a switch attached, or want to use the PHY's interrupt
+ * notification capabilities) you can provide a static PHY
+ * configuration here
+ *
+ * IRQs may only be set, if a PHY address was configured
+ * If a PHY address is given, also a bus id is required to be set
+ *
+ * ps: make sure the used irqs are configured properly in the board
+ * specific irq-map
+ */
-struct phy_ops ks8995m_ops = {
- ks8995m_init,
- ks8995m_reset,
- ks8995m_status,
-};
+#if defined(CONFIG_MIPS_BOSPORUS)
+/*
+ * Micrel/Kendin 5 port switch attached to MAC0,
+ * MAC0 is associated with PHY address 5 (== WAN port)
+ * MAC1 is not associated with any PHY, since it's connected directly
+ * to the switch.
+ * no interrupts are used
+ */
+# define AU1XXX_PHY_STATIC_CONFIG
-struct phy_ops smsc_83C185_ops = {
- smsc_83C185_init,
- smsc_83C185_reset,
- smsc_83C185_status,
-};
+# define AU1XXX_PHY0_ADDR 5
+# define AU1XXX_PHY0_BUSID 0
+# undef AU1XXX_PHY0_IRQ
-#ifdef CONFIG_MIPS_BOSPORUS
-struct phy_ops stub_ops = {
- stub_init,
- stub_reset,
- stub_status,
-};
+# undef AU1XXX_PHY1_ADDR
+# undef AU1XXX_PHY1_BUSID
+# undef AU1XXX_PHY1_IRQ
#endif
-static struct mii_chip_info {
- const char * name;
- u16 phy_id0;
- u16 phy_id1;
- struct phy_ops *phy_ops;
- int dual_phy;
-} mii_chip_table[] = {
- {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
- {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
- {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
- {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
- {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
- {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
- {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
- {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
- {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
- {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
-#ifdef CONFIG_MIPS_BOSPORUS
- {"Stub", 0x1234, 0x5678, &stub_ops },
+#if defined(AU1XXX_PHY0_BUSID) && (AU1XXX_PHY0_BUSID > 0)
+# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
#endif
- {0,},
-};
-static int mdio_read(struct net_device *dev, int phy_id, int reg)
+/*
+ * MII operations
+ */
+static int mdio_read(struct net_device *dev, int phy_addr, int reg)
{
struct au1000_private *aup = (struct au1000_private *) dev->priv;
- volatile u32 *mii_control_reg;
- volatile u32 *mii_data_reg;
+ volatile u32 *const mii_control_reg = &aup->mac->mii_control;
+ volatile u32 *const mii_data_reg = &aup->mac->mii_data;
u32 timedout = 20;
u32 mii_control;
- #ifdef CONFIG_BCM5222_DUAL_PHY
- /* First time we probe, it's for the mac0 phy.
- * Since we haven't determined yet that we have a dual phy,
- * aup->mii->mii_control_reg won't be setup and we'll
- * default to the else statement.
- * By the time we probe for the mac1 phy, the mii_control_reg
- * will be setup to be the address of the mac0 phy control since
- * both phys are controlled through mac0.
- */
- if (aup->mii && aup->mii->mii_control_reg) {
- mii_control_reg = aup->mii->mii_control_reg;
- mii_data_reg = aup->mii->mii_data_reg;
- }
- else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
- /* assume both phys are controlled through mac0 */
- mii_control_reg = au_macs[0]->mii->mii_control_reg;
- mii_data_reg = au_macs[0]->mii->mii_data_reg;
- }
- else
- #endif
- {
- /* default control and data reg addresses */
- mii_control_reg = &aup->mac->mii_control;
- mii_data_reg = &aup->mac->mii_data;
- }
-
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
@@ -835,7 +215,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg)
}
mii_control = MAC_SET_MII_SELECT_REG(reg) |
- MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
*mii_control_reg = mii_control;
@@ -851,32 +231,14 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg)
return (int)*mii_data_reg;
}
-static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
+static void mdio_write(struct net_device *dev, int phy_addr, int reg, u16 value)
{
struct au1000_private *aup = (struct au1000_private *) dev->priv;
- volatile u32 *mii_control_reg;
- volatile u32 *mii_data_reg;
+ volatile u32 *const mii_control_reg = &aup->mac->mii_control;
+ volatile u32 *const mii_data_reg = &aup->mac->mii_data;
u32 timedout = 20;
u32 mii_control;
- #ifdef CONFIG_BCM5222_DUAL_PHY
- if (aup->mii && aup->mii->mii_control_reg) {
- mii_control_reg = aup->mii->mii_control_reg;
- mii_data_reg = aup->mii->mii_data_reg;
- }
- else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
- /* assume both phys are controlled through mac0 */
- mii_control_reg = au_macs[0]->mii->mii_control_reg;
- mii_data_reg = au_macs[0]->mii->mii_data_reg;
- }
- else
- #endif
- {
- /* default control and data reg addresses */
- mii_control_reg = &aup->mac->mii_control;
- mii_data_reg = &aup->mac->mii_data;
- }
-
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
@@ -887,165 +249,145 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
}
mii_control = MAC_SET_MII_SELECT_REG(reg) |
- MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
*mii_data_reg = value;
*mii_control_reg = mii_control;
}
+static int mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
+ * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
+ struct net_device *const dev = bus->priv;
+
+ enable_mac(dev, 0); /* make sure the MAC associated with this
+ * mii_bus is enabled */
+ return mdio_read(dev, phy_addr, regnum);
+}
-static void dump_mii(struct net_device *dev, int phy_id)
+static int mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
{
- int i, val;
+ struct net_device *const dev = bus->priv;
- for (i = 0; i < 7; i++) {
- if ((val = mdio_read(dev, phy_id, i)) >= 0)
- printk("%s: MII Reg %d=%x\n", dev->name, i, val);
- }
- for (i = 16; i < 25; i++) {
- if ((val = mdio_read(dev, phy_id, i)) >= 0)
- printk("%s: MII Reg %d=%x\n", dev->name, i, val);
- }
+ enable_mac(dev, 0); /* make sure the MAC associated with this
+ * mii_bus is enabled */
+ mdio_write(dev, phy_addr, regnum, value);
+ return 0;
}
-static int mii_probe (struct net_device * dev)
+static int mdiobus_reset(struct mii_bus *bus)
{
- struct au1000_private *aup = (struct au1000_private *) dev->priv;
- int phy_addr;
-#ifdef CONFIG_MIPS_BOSPORUS
- int phy_found=0;
-#endif
+ struct net_device *const dev = bus->priv;
- /* search for total of 32 possible mii phy addresses */
- for (phy_addr = 0; phy_addr < 32; phy_addr++) {
- u16 mii_status;
- u16 phy_id0, phy_id1;
- int i;
+ enable_mac(dev, 0); /* make sure the MAC associated with this
+ * mii_bus is enabled */
+ return 0;
+}
- #ifdef CONFIG_BCM5222_DUAL_PHY
- /* Mask the already found phy, try next one */
- if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
- if (au_macs[0]->phy_addr == phy_addr)
- continue;
- }
- #endif
-
- mii_status = mdio_read(dev, phy_addr, MII_STATUS);
- if (mii_status == 0xffff || mii_status == 0x0000)
- /* the mii is not accessable, try next one */
- continue;
-
- phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
- phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
-
- /* search our mii table for the current mii */
- for (i = 0; mii_chip_table[i].phy_id1; i++) {
- if (phy_id0 == mii_chip_table[i].phy_id0 &&
- phy_id1 == mii_chip_table[i].phy_id1) {
- struct mii_phy * mii_phy = aup->mii;
-
- printk(KERN_INFO "%s: %s at phy address %d\n",
- dev->name, mii_chip_table[i].name,
- phy_addr);
-#ifdef CONFIG_MIPS_BOSPORUS
- phy_found = 1;
-#endif
- mii_phy->chip_info = mii_chip_table+i;
- aup->phy_addr = phy_addr;
- aup->want_autoneg = 1;
- aup->phy_ops = mii_chip_table[i].phy_ops;
- aup->phy_ops->phy_init(dev,phy_addr);
-
- // Check for dual-phy and then store required
- // values and set indicators. We need to do
- // this now since mdio_{read,write} need the
- // control and data register addresses.
- #ifdef CONFIG_BCM5222_DUAL_PHY
- if ( mii_chip_table[i].dual_phy) {
-
- /* assume both phys are controlled
- * through MAC0. Board specific? */
-
- /* sanity check */
- if (!au_macs[0] || !au_macs[0]->mii)
- return -1;
- aup->mii->mii_control_reg = (u32 *)
- &au_macs[0]->mac->mii_control;
- aup->mii->mii_data_reg = (u32 *)
- &au_macs[0]->mac->mii_data;
- }
- #endif
- goto found;
- }
+static int mii_probe (struct net_device *dev)
+{
+ struct au1000_private *const aup = (struct au1000_private *) dev->priv;
+ struct phy_device *phydev = NULL;
+
+#if defined(AU1XXX_PHY_STATIC_CONFIG)
+ BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
+
+ if(aup->mac_id == 0) { /* get PHY0 */
+# if defined(AU1XXX_PHY0_ADDR)
+ phydev = au_macs[AU1XXX_PHY0_BUSID]->mii_bus.phy_map[AU1XXX_PHY0_ADDR];
+# else
+ printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
+ dev->name);
+ return 0;
+# endif /* defined(AU1XXX_PHY0_ADDR) */
+ } else if (aup->mac_id == 1) { /* get PHY1 */
+# if defined(AU1XXX_PHY1_ADDR)
+ phydev = au_macs[AU1XXX_PHY1_BUSID]->mii_bus.phy_map[AU1XXX_PHY1_ADDR];
+# else
+ printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
+ dev->name);
+ return 0;
+# endif /* defined(AU1XXX_PHY1_ADDR) */
+ }
+
+#else /* defined(AU1XXX_PHY_STATIC_CONFIG) */
+ int phy_addr;
+
+ /* find the first (lowest address) PHY on the current MAC's MII bus */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus.phy_map[phy_addr]) {
+ phydev = aup->mii_bus.phy_map[phy_addr];
+# if !defined(AU1XXX_PHY_SEARCH_HIGHEST_ADDR)
+ break; /* break out with first one found */
+# endif
}
- }
-found:
-
-#ifdef CONFIG_MIPS_BOSPORUS
- /* This is a workaround for the Micrel/Kendin 5 port switch
- The second MAC doesn't see a PHY connected... so we need to
- trick it into thinking we have one.
-
- If this kernel is run on another Au1500 development board
- the stub will be found as well as the actual PHY. However,
- the last found PHY will be used... usually at Addr 31 (Db1500).
- */
- if ( (!phy_found) )
- {
- u16 phy_id0, phy_id1;
- int i;
- phy_id0 = 0x1234;
- phy_id1 = 0x5678;
-
- /* search our mii table for the current mii */
- for (i = 0; mii_chip_table[i].phy_id1; i++) {
- if (phy_id0 == mii_chip_table[i].phy_id0 &&
- phy_id1 == mii_chip_table[i].phy_id1) {
- struct mii_phy * mii_phy;
-
- printk(KERN_INFO "%s: %s at phy address %d\n",
- dev->name, mii_chip_table[i].name,
- phy_addr);
- mii_phy = kmalloc(sizeof(struct mii_phy),
- GFP_KERNEL);
- if (mii_phy) {
- mii_phy->chip_info = mii_chip_table+i;
- aup->phy_addr = phy_addr;
- mii_phy->next = aup->mii;
- aup->phy_ops =
- mii_chip_table[i].phy_ops;
- aup->mii = mii_phy;
- aup->phy_ops->phy_init(dev,phy_addr);
- } else {
- printk(KERN_ERR "%s: out of memory\n",
- dev->name);
- return -1;
- }
- mii_phy->chip_info = mii_chip_table+i;
- aup->phy_addr = phy_addr;
- aup->phy_ops = mii_chip_table[i].phy_ops;
- aup->phy_ops->phy_init(dev,phy_addr);
- break;
- }
+# if defined(AU1XXX_PHY1_SEARCH_ON_MAC0)
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ BUG_ON(!au_macs[0]);
+
+ /* find the first (lowest address) non-attached PHY on
+ * the MAC0 MII bus */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ au_macs[0]->mii_bus.phy_map[phy_addr];
+
+ if (!tmp_phydev)
+ continue; /* no PHY here... */
+
+ if (tmp_phydev->attached_dev)
+ continue; /* already claimed by MAC0 */
+
+ phydev = tmp_phydev;
+ break; /* found it */
}
}
- if (aup->mac_id == 0) {
- /* the Bosporus phy responds to addresses 0-5 but
- * 5 is the correct one.
- */
- aup->phy_addr = 5;
- }
-#endif
+# endif /* defined(AU1XXX_PHY1_SEARCH_OTHER_BUS) */
- if (aup->mii->chip_info == NULL) {
- printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
- dev->name);
+#endif /* defined(AU1XXX_PHY_STATIC_CONFIG) */
+ if (!phydev) {
+ printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
return -1;
}
- printk(KERN_INFO "%s: Using %s as default\n",
- dev->name, aup->mii->chip_info->name);
+ /* now we are supposed to have a proper phydev, to attach to... */
+ BUG_ON(!phydev);
+ BUG_ON(phydev->attached_dev);
+
+ phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0);
+
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+
+ aup->old_link = 0;
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ aup->phy_dev = phydev;
+
+ printk(KERN_INFO "%s: attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
+ dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
return 0;
}
@@ -1097,35 +439,38 @@ static void hard_stop(struct net_device *dev)
au_sync_delay(10);
}
-
-static void reset_mac(struct net_device *dev)
+static void enable_mac(struct net_device *dev, int force_reset)
{
- int i;
- u32 flags;
+ unsigned long flags;
struct au1000_private *aup = (struct au1000_private *) dev->priv;
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: reset mac, aup %x\n",
- dev->name, (unsigned)aup);
-
spin_lock_irqsave(&aup->lock, flags);
- if (aup->timer.function == &au1000_timer) {/* check if timer initted */
- del_timer(&aup->timer);
- }
- hard_stop(dev);
- #ifdef CONFIG_BCM5222_DUAL_PHY
- if (aup->mac_id != 0) {
- #endif
- /* If BCM5222, we can't leave MAC0 in reset because then
- * we can't access the dual phy for ETH1 */
+ if(force_reset || (!aup->mac_enabled)) {
*aup->enable = MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
- *aup->enable = 0;
+ *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE);
au_sync_delay(2);
- #ifdef CONFIG_BCM5222_DUAL_PHY
+
+ aup->mac_enabled = 1;
}
- #endif
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+static void reset_mac_unlocked(struct net_device *dev)
+{
+ struct au1000_private *const aup = (struct au1000_private *) dev->priv;
+ int i;
+
+ hard_stop(dev);
+
+ *aup->enable = MAC_EN_CLOCK_ENABLE;
+ au_sync_delay(2);
+ *aup->enable = 0;
+ au_sync_delay(2);
+
aup->tx_full = 0;
for (i = 0; i < NUM_RX_DMA; i++) {
/* reset control bits */
@@ -1135,9 +480,26 @@ static void reset_mac(struct net_device *dev)
/* reset control bits */
aup->tx_dma_ring[i]->buff_stat &= ~0xf;
}
- spin_unlock_irqrestore(&aup->lock, flags);
+
+ aup->mac_enabled = 0;
+
}
+static void reset_mac(struct net_device *dev)
+{
+ struct au1000_private *const aup = (struct au1000_private *) dev->priv;
+ unsigned long flags;
+
+ if (au1000_debug > 4)
+ printk(KERN_INFO "%s: reset mac, aup %x\n",
+ dev->name, (unsigned)aup);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ reset_mac_unlocked (dev);
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
/*
* Setup the receive and transmit "rings". These pointers are the addresses
@@ -1160,12 +522,27 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
}
static struct {
- int port;
u32 base_addr;
u32 macen_addr;
int irq;
struct net_device *dev;
-} iflist[2];
+} iflist[2] = {
+#ifdef CONFIG_SOC_AU1000
+ {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
+ {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1100
+ {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1500
+ {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
+ {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1550
+ {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
+ {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
+#endif
+};
static int num_ifs;
@@ -1176,58 +553,14 @@ static int num_ifs;
*/
static int __init au1000_init_module(void)
{
- struct cpuinfo_mips *c = &current_cpu_data;
int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
struct net_device *dev;
int i, found_one = 0;
- switch (c->cputype) {
-#ifdef CONFIG_SOC_AU1000
- case CPU_AU1000:
- num_ifs = 2 - ni;
- iflist[0].base_addr = AU1000_ETH0_BASE;
- iflist[1].base_addr = AU1000_ETH1_BASE;
- iflist[0].macen_addr = AU1000_MAC0_ENABLE;
- iflist[1].macen_addr = AU1000_MAC1_ENABLE;
- iflist[0].irq = AU1000_MAC0_DMA_INT;
- iflist[1].irq = AU1000_MAC1_DMA_INT;
- break;
-#endif
-#ifdef CONFIG_SOC_AU1100
- case CPU_AU1100:
- num_ifs = 1 - ni;
- iflist[0].base_addr = AU1100_ETH0_BASE;
- iflist[0].macen_addr = AU1100_MAC0_ENABLE;
- iflist[0].irq = AU1100_MAC0_DMA_INT;
- break;
-#endif
-#ifdef CONFIG_SOC_AU1500
- case CPU_AU1500:
- num_ifs = 2 - ni;
- iflist[0].base_addr = AU1500_ETH0_BASE;
- iflist[1].base_addr = AU1500_ETH1_BASE;
- iflist[0].macen_addr = AU1500_MAC0_ENABLE;
- iflist[1].macen_addr = AU1500_MAC1_ENABLE;
- iflist[0].irq = AU1500_MAC0_DMA_INT;
- iflist[1].irq = AU1500_MAC1_DMA_INT;
- break;
-#endif
-#ifdef CONFIG_SOC_AU1550
- case CPU_AU1550:
- num_ifs = 2 - ni;
- iflist[0].base_addr = AU1550_ETH0_BASE;
- iflist[1].base_addr = AU1550_ETH1_BASE;
- iflist[0].macen_addr = AU1550_MAC0_ENABLE;
- iflist[1].macen_addr = AU1550_MAC1_ENABLE;
- iflist[0].irq = AU1550_MAC0_DMA_INT;
- iflist[1].irq = AU1550_MAC1_DMA_INT;
- break;
-#endif
- default:
- num_ifs = 0;
- }
+ num_ifs = NUM_ETH_INTERFACES - ni;
+
for(i = 0; i < num_ifs; i++) {
- dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
+ dev = au1000_probe(i);
iflist[i].dev = dev;
if (dev)
found_one++;
@@ -1237,178 +570,31 @@ static int __init au1000_init_module(void)
return 0;
}
-static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
-{
- struct au1000_private *aup = (struct au1000_private *)dev->priv;
- u16 ctl, adv;
-
- /* Setup standard advertise */
- adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
-
- /* Start/Restart aneg */
- ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
- ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
- mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
-
- return 0;
-}
+/*
+ * ethtool operations
+ */
-static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
+static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct au1000_private *aup = (struct au1000_private *)dev->priv;
- u16 ctl;
-
- ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
- ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
-
- /* First reset the PHY */
- mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
-
- /* Select speed & duplex */
- switch (speed) {
- case SPEED_10:
- break;
- case SPEED_100:
- ctl |= BMCR_SPEED100;
- break;
- case SPEED_1000:
- default:
- return -EINVAL;
- }
- if (fd == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
- mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
- return 0;
-}
+ if (aup->phy_dev)
+ return phy_ethtool_gset(aup->phy_dev, cmd);
-
-static void
-au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct au1000_private *aup = (struct au1000_private *)dev->priv;
- u32 advertise;
- int autoneg;
- int forced_speed;
- int forced_duplex;
-
- /* Default advertise */
- advertise = GENMII_DEFAULT_ADVERTISE;
- autoneg = aup->want_autoneg;
- forced_speed = SPEED_100;
- forced_duplex = DUPLEX_FULL;
-
- /* Setup link parameters */
- if (cmd) {
- if (cmd->autoneg == AUTONEG_ENABLE) {
- advertise = cmd->advertising;
- autoneg = 1;
- } else {
- autoneg = 0;
-
- forced_speed = cmd->speed;
- forced_duplex = cmd->duplex;
- }
- }
-
- /* Configure PHY & start aneg */
- aup->want_autoneg = autoneg;
- if (autoneg)
- au1000_setup_aneg(dev, advertise);
- else
- au1000_setup_forced(dev, forced_speed, forced_duplex);
- mod_timer(&aup->timer, jiffies + HZ);
+ return -EINVAL;
}
-static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct au1000_private *aup = (struct au1000_private *)dev->priv;
- u16 link, speed;
-
- cmd->supported = GENMII_DEFAULT_FEATURES;
- cmd->advertising = GENMII_DEFAULT_ADVERTISE;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = aup->phy_addr;
- spin_lock_irq(&aup->lock);
- cmd->autoneg = aup->want_autoneg;
- aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
- if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
- cmd->speed = SPEED_100;
- else if (speed == IF_PORT_10BASET)
- cmd->speed = SPEED_10;
- if (link && (dev->if_port == IF_PORT_100BASEFX))
- cmd->duplex = DUPLEX_FULL;
- else
- cmd->duplex = DUPLEX_HALF;
- spin_unlock_irq(&aup->lock);
- return 0;
-}
-static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct au1000_private *aup = (struct au1000_private *)dev->priv;
- unsigned long features = GENMII_DEFAULT_FEATURES;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
- return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
- return -EINVAL;
- if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
- return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE)
- switch (cmd->speed) {
- case SPEED_10:
- if (cmd->duplex == DUPLEX_HALF &&
- (features & SUPPORTED_10baseT_Half) == 0)
- return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
- (features & SUPPORTED_10baseT_Full) == 0)
- return -EINVAL;
- break;
- case SPEED_100:
- if (cmd->duplex == DUPLEX_HALF &&
- (features & SUPPORTED_100baseT_Half) == 0)
- return -EINVAL;
- if (cmd->duplex == DUPLEX_FULL &&
- (features & SUPPORTED_100baseT_Full) == 0)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
- else if ((features & SUPPORTED_Autoneg) == 0)
- return -EINVAL;
-
- spin_lock_irq(&aup->lock);
- au1000_start_link(dev, cmd);
- spin_unlock_irq(&aup->lock);
- return 0;
-}
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
-static int au1000_nway_reset(struct net_device *dev)
-{
- struct au1000_private *aup = (struct au1000_private *)dev->priv;
+ if (aup->phy_dev)
+ return phy_ethtool_sset(aup->phy_dev, cmd);
- if (!aup->want_autoneg)
- return -EINVAL;
- spin_lock_irq(&aup->lock);
- au1000_start_link(dev, NULL);
- spin_unlock_irq(&aup->lock);
- return 0;
+ return -EINVAL;
}
static void
@@ -1423,21 +609,14 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
info->regdump_len = 0;
}
-static u32 au1000_get_link(struct net_device *dev)
-{
- return netif_carrier_ok(dev);
-}
-
static struct ethtool_ops au1000_ethtool_ops = {
.get_settings = au1000_get_settings,
.set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
- .nway_reset = au1000_nway_reset,
- .get_link = au1000_get_link
+ .get_link = ethtool_op_get_link,
};
-static struct net_device *
-au1000_probe(u32 ioaddr, int irq, int port_num)
+static struct net_device * au1000_probe(int port_num)
{
static unsigned version_printed = 0;
struct au1000_private *aup = NULL;
@@ -1445,106 +624,115 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
db_dest_t *pDB, *pDBfree;
char *pmac, *argptr;
char ethaddr[6];
- int i, err;
+ int irq, i, err;
+ u32 base, macen;
+
+ if (port_num >= NUM_ETH_INTERFACES)
+ return NULL;
+
+ base = CPHYSADDR(iflist[port_num].base_addr );
+ macen = CPHYSADDR(iflist[port_num].macen_addr);
+ irq = iflist[port_num].irq;
- if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
+ if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
+ !request_mem_region(macen, 4, "Au1x00 ENET"))
return NULL;
- if (version_printed++ == 0)
+ if (version_printed++ == 0)
printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
+ printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
return NULL;
}
- if ((err = register_netdev(dev))) {
- printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
- err);
+ if ((err = register_netdev(dev)) != 0) {
+ printk(KERN_ERR "%s: Cannot register net device, error %d\n",
+ DRV_NAME, err);
free_netdev(dev);
return NULL;
}
- printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
- dev->name, ioaddr, irq);
+ printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
+ dev->name, base, irq);
aup = dev->priv;
/* Allocate the data buffers */
/* Snooping works fine with eth on all au1xxx */
- aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
- MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
- &aup->dma_addr,
- 0);
+ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
free_netdev(dev);
- release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+ release_mem_region( base, MAC_IOSIZE);
+ release_mem_region(macen, 4);
return NULL;
}
/* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
+ aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
+
/* Setup some variables for quick register address access */
- if (ioaddr == iflist[0].base_addr)
- {
- /* check env variables first */
- if (!get_ethernet_addr(ethaddr)) {
+ aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
+ aup->mac_id = port_num;
+ au_macs[port_num] = aup;
+
+ if (port_num == 0) {
+ /* Check the environment variables first */
+ if (get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
- } else {
+ else {
/* Check command line */
argptr = prom_getcmdline();
- if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
- printk(KERN_INFO "%s: No mac address found\n",
- dev->name);
- /* use the hard coded mac addresses */
- } else {
+ if ((pmac = strstr(argptr, "ethaddr=")) == NULL)
+ printk(KERN_INFO "%s: No MAC address found\n",
+ dev->name);
+ /* Use the hard coded MAC addresses */
+ else {
str2eaddr(ethaddr, pmac + strlen("ethaddr="));
memcpy(au1000_mac_addr, ethaddr,
- sizeof(au1000_mac_addr));
+ sizeof(au1000_mac_addr));
}
}
- aup->enable = (volatile u32 *)
- ((unsigned long)iflist[0].macen_addr);
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+
setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- aup->mac_id = 0;
- au_macs[0] = aup;
- }
- else
- if (ioaddr == iflist[1].base_addr)
- {
- aup->enable = (volatile u32 *)
- ((unsigned long)iflist[1].macen_addr);
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[4] += 0x10;
+ } else if (port_num == 1)
setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
- aup->mac_id = 1;
- au_macs[1] = aup;
- }
- else
- {
- printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
- }
-
- /* bring the device out of reset, otherwise probing the mii
- * will hang */
- *aup->enable = MAC_EN_CLOCK_ENABLE;
- au_sync_delay(2);
- *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
- MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
- au_sync_delay(2);
- aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
- if (!aup->mii) {
- printk(KERN_ERR "%s: out of memory\n", dev->name);
- goto err_out;
- }
- aup->mii->next = NULL;
- aup->mii->chip_info = NULL;
- aup->mii->status = 0;
- aup->mii->mii_control_reg = 0;
- aup->mii->mii_data_reg = 0;
+ /*
+ * Assign to the Ethernet ports two consecutive MAC addresses
+ * to match those that are printed on their stickers
+ */
+ memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+ dev->dev_addr[5] += port_num;
+
+ *aup->enable = 0;
+ aup->mac_enabled = 0;
+
+ aup->mii_bus.priv = dev;
+ aup->mii_bus.read = mdiobus_read;
+ aup->mii_bus.write = mdiobus_write;
+ aup->mii_bus.reset = mdiobus_reset;
+ aup->mii_bus.name = "au1000_eth_mii";
+ aup->mii_bus.id = aup->mac_id;
+ aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ for(i = 0; i < PHY_MAX_ADDR; ++i)
+ aup->mii_bus.irq[i] = PHY_POLL;
+
+ /* if known, set corresponding PHY IRQs */
+#if defined(AU1XXX_PHY_STATIC_CONFIG)
+# if defined(AU1XXX_PHY0_IRQ)
+ if (AU1XXX_PHY0_BUSID == aup->mii_bus.id)
+ aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
+# endif
+# if defined(AU1XXX_PHY1_IRQ)
+ if (AU1XXX_PHY1_BUSID == aup->mii_bus.id)
+ aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
+# endif
+#endif
+ mdiobus_register(&aup->mii_bus);
if (mii_probe(dev) != 0) {
goto err_out;
@@ -1581,7 +769,7 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
}
spin_lock_init(&aup->lock);
- dev->base_addr = ioaddr;
+ dev->base_addr = base;
dev->irq = irq;
dev->open = au1000_open;
dev->hard_start_xmit = au1000_tx;
@@ -1590,7 +778,6 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
dev->set_multicast_list = &set_rx_mode;
dev->do_ioctl = &au1000_ioctl;
SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
- dev->set_config = &au1000_set_config;
dev->tx_timeout = au1000_tx_timeout;
dev->watchdog_timeo = ETH_TX_TIMEOUT;
@@ -1606,7 +793,7 @@ err_out:
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
reset_mac(dev);
- kfree(aup->mii);
+
for (i = 0; i < NUM_RX_DMA; i++) {
if (aup->rx_db_inuse[i])
ReleaseDB(aup, aup->rx_db_inuse[i]);
@@ -1615,13 +802,12 @@ err_out:
if (aup->tx_db_inuse[i])
ReleaseDB(aup, aup->tx_db_inuse[i]);
}
- dma_free_noncoherent(NULL,
- MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
- (void *)aup->vaddr,
- aup->dma_addr);
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
unregister_netdev(dev);
free_netdev(dev);
- release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+ release_mem_region( base, MAC_IOSIZE);
+ release_mem_region(macen, 4);
return NULL;
}
@@ -1640,19 +826,14 @@ static int au1000_init(struct net_device *dev)
u32 flags;
int i;
u32 control;
- u16 link, speed;
if (au1000_debug > 4)
printk("%s: au1000_init\n", dev->name);
- spin_lock_irqsave(&aup->lock, flags);
-
/* bring the device out of reset */
- *aup->enable = MAC_EN_CLOCK_ENABLE;
- au_sync_delay(2);
- *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
- MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
- au_sync_delay(20);
+ enable_mac(dev, 1);
+
+ spin_lock_irqsave(&aup->lock, flags);
aup->mac->control = 0;
aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
@@ -1668,12 +849,16 @@ static int au1000_init(struct net_device *dev)
}
au_sync();
- aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
- control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
+ control = MAC_RX_ENABLE | MAC_TX_ENABLE;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
control |= MAC_BIG_ENDIAN;
#endif
- if (link && (dev->if_port == IF_PORT_100BASEFX)) {
+ if (aup->phy_dev) {
+ if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
+ control |= MAC_FULL_DUPLEX;
+ else
+ control |= MAC_DISABLE_RX_OWN;
+ } else { /* PHY-less op, assume full-duplex */
control |= MAC_FULL_DUPLEX;
}
@@ -1685,57 +870,84 @@ static int au1000_init(struct net_device *dev)
return 0;
}
-static void au1000_timer(unsigned long data)
+static void
+au1000_adjust_link(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *)data;
struct au1000_private *aup = (struct au1000_private *) dev->priv;
- unsigned char if_port;
- u16 link, speed;
+ struct phy_device *phydev = aup->phy_dev;
+ unsigned long flags;
- if (!dev) {
- /* fatal error, don't restart the timer */
- printk(KERN_ERR "au1000_timer error: NULL dev\n");
- return;
- }
+ int status_change = 0;
- if_port = dev->if_port;
- if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
- if (link) {
- if (!netif_carrier_ok(dev)) {
- netif_carrier_on(dev);
- printk(KERN_INFO "%s: link up\n", dev->name);
- }
- }
- else {
- if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- dev->if_port = 0;
- printk(KERN_INFO "%s: link down\n", dev->name);
- }
+ BUG_ON(!aup->phy_dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (phydev->link && (aup->old_speed != phydev->speed)) {
+ // speed changed
+
+ switch(phydev->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Speed (%d) is not 10/100 ???\n",
+ dev->name, phydev->speed);
+ break;
}
+
+ aup->old_speed = phydev->speed;
+
+ status_change = 1;
}
- if (link && (dev->if_port != if_port) &&
- (dev->if_port != IF_PORT_UNKNOWN)) {
+ if (phydev->link && (aup->old_duplex != phydev->duplex)) {
+ // duplex mode changed
+
+ /* switching duplex mode requires to disable rx and tx! */
hard_stop(dev);
- if (dev->if_port == IF_PORT_100BASEFX) {
- printk(KERN_INFO "%s: going to full duplex\n",
- dev->name);
- aup->mac->control |= MAC_FULL_DUPLEX;
- au_sync_delay(1);
- }
- else {
- aup->mac->control &= ~MAC_FULL_DUPLEX;
- au_sync_delay(1);
- }
+
+ if (DUPLEX_FULL == phydev->duplex)
+ aup->mac->control = ((aup->mac->control
+ | MAC_FULL_DUPLEX)
+ & ~MAC_DISABLE_RX_OWN);
+ else
+ aup->mac->control = ((aup->mac->control
+ & ~MAC_FULL_DUPLEX)
+ | MAC_DISABLE_RX_OWN);
+ au_sync_delay(1);
+
enable_rx_tx(dev);
+ aup->old_duplex = phydev->duplex;
+
+ status_change = 1;
}
- aup->timer.expires = RUN_AT((1*HZ));
- aup->timer.data = (unsigned long)dev;
- aup->timer.function = &au1000_timer; /* timer handler */
- add_timer(&aup->timer);
+ if(phydev->link != aup->old_link) {
+ // link state changed
+ if (phydev->link) // link went up
+ netif_schedule(dev);
+ else { // link went down
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ }
+
+ aup->old_link = phydev->link;
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ printk(KERN_INFO "%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
}
static int au1000_open(struct net_device *dev)
@@ -1746,25 +958,26 @@ static int au1000_open(struct net_device *dev)
if (au1000_debug > 4)
printk("%s: open: dev=%p\n", dev->name, dev);
+ if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
+ dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
if ((retval = au1000_init(dev))) {
printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
free_irq(dev->irq, dev);
return retval;
}
- netif_start_queue(dev);
- if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
- dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
- return retval;
+ if (aup->phy_dev) {
+ /* cause the PHY state machine to schedule a link state check */
+ aup->phy_dev->state = PHY_CHANGELINK;
+ phy_start(aup->phy_dev);
}
- init_timer(&aup->timer); /* used in ioctl() */
- aup->timer.expires = RUN_AT((3*HZ));
- aup->timer.data = (unsigned long)dev;
- aup->timer.function = &au1000_timer; /* timer handler */
- add_timer(&aup->timer);
+ netif_start_queue(dev);
if (au1000_debug > 4)
printk("%s: open: Initialization done.\n", dev->name);
@@ -1774,16 +987,19 @@ static int au1000_open(struct net_device *dev)
static int au1000_close(struct net_device *dev)
{
- u32 flags;
- struct au1000_private *aup = (struct au1000_private *) dev->priv;
+ unsigned long flags;
+ struct au1000_private *const aup = (struct au1000_private *) dev->priv;
if (au1000_debug > 4)
printk("%s: close: dev=%p\n", dev->name, dev);
- reset_mac(dev);
+ if (aup->phy_dev)
+ phy_stop(aup->phy_dev);
spin_lock_irqsave(&aup->lock, flags);
-
+
+ reset_mac_unlocked (dev);
+
/* stop the device */
netif_stop_queue(dev);
@@ -1805,21 +1021,18 @@ static void __exit au1000_cleanup_module(void)
if (dev) {
aup = (struct au1000_private *) dev->priv;
unregister_netdev(dev);
- kfree(aup->mii);
- for (j = 0; j < NUM_RX_DMA; j++) {
+ for (j = 0; j < NUM_RX_DMA; j++)
if (aup->rx_db_inuse[j])
ReleaseDB(aup, aup->rx_db_inuse[j]);
- }
- for (j = 0; j < NUM_TX_DMA; j++) {
+ for (j = 0; j < NUM_TX_DMA; j++)
if (aup->tx_db_inuse[j])
ReleaseDB(aup, aup->tx_db_inuse[j]);
- }
- dma_free_noncoherent(NULL,
- MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
- (void *)aup->vaddr,
- aup->dma_addr);
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+ release_mem_region(dev->base_addr, MAC_IOSIZE);
+ release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
free_netdev(dev);
- release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
}
}
}
@@ -1830,7 +1043,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
struct net_device_stats *ps = &aup->stats;
if (status & TX_FRAME_ABORTED) {
- if (dev->if_port == IF_PORT_100BASEFX) {
+ if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
/* any other tx errors are only valid
* in half duplex mode */
@@ -2104,126 +1317,15 @@ static void set_rx_mode(struct net_device *dev)
}
}
-
static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct au1000_private *aup = (struct au1000_private *)dev->priv;
- u16 *data = (u16 *)&rq->ifr_ifru;
-
- switch(cmd) {
- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
- case SIOCGMIIPHY:
- if (!netif_running(dev)) return -EINVAL;
- data[0] = aup->phy_addr;
- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
- case SIOCGMIIREG:
- data[3] = mdio_read(dev, data[0], data[1]);
- return 0;
- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
- case SIOCSMIIREG:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- mdio_write(dev, data[0], data[1],data[2]);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-
-}
-
-
-static int au1000_set_config(struct net_device *dev, struct ifmap *map)
-{
- struct au1000_private *aup = (struct au1000_private *) dev->priv;
- u16 control;
- if (au1000_debug > 4) {
- printk("%s: set_config called: dev->if_port %d map->port %x\n",
- dev->name, dev->if_port, map->port);
- }
+ if (!netif_running(dev)) return -EINVAL;
- switch(map->port){
- case IF_PORT_UNKNOWN: /* use auto here */
- printk(KERN_INFO "%s: config phy for aneg\n",
- dev->name);
- dev->if_port = map->port;
- /* Link Down: the timer will bring it up */
- netif_carrier_off(dev);
-
- /* read current control */
- control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
- control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
-
- /* enable auto negotiation and reset the negotiation */
- mdio_write(dev, aup->phy_addr, MII_CONTROL,
- control | MII_CNTL_AUTO |
- MII_CNTL_RST_AUTO);
+ if (!aup->phy_dev) return -EINVAL; // PHY not controllable
- break;
-
- case IF_PORT_10BASET: /* 10BaseT */
- printk(KERN_INFO "%s: config phy for 10BaseT\n",
- dev->name);
- dev->if_port = map->port;
-
- /* Link Down: the timer will bring it up */
- netif_carrier_off(dev);
-
- /* set Speed to 10Mbps, Half Duplex */
- control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
- control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
- MII_CNTL_FDX);
-
- /* disable auto negotiation and force 10M/HD mode*/
- mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
- break;
-
- case IF_PORT_100BASET: /* 100BaseT */
- case IF_PORT_100BASETX: /* 100BaseTx */
- printk(KERN_INFO "%s: config phy for 100BaseTX\n",
- dev->name);
- dev->if_port = map->port;
-
- /* Link Down: the timer will bring it up */
- netif_carrier_off(dev);
-
- /* set Speed to 100Mbps, Half Duplex */
- /* disable auto negotiation and enable 100MBit Mode */
- control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
- control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
- control |= MII_CNTL_F100;
- mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
- break;
-
- case IF_PORT_100BASEFX: /* 100BaseFx */
- printk(KERN_INFO "%s: config phy for 100BaseFX\n",
- dev->name);
- dev->if_port = map->port;
-
- /* Link Down: the timer will bring it up */
- netif_carrier_off(dev);
-
- /* set Speed to 100Mbps, Full Duplex */
- /* disable auto negotiation and enable 100MBit Mode */
- control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
- control &= ~MII_CNTL_AUTO;
- control |= MII_CNTL_F100 | MII_CNTL_FDX;
- mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
- break;
- case IF_PORT_10BASE2: /* 10Base2 */
- case IF_PORT_AUI: /* AUI */
- /* These Modes are not supported (are they?)*/
- printk(KERN_ERR "%s: 10Base2/AUI not supported",
- dev->name);
- return -EOPNOTSUPP;
- break;
-
- default:
- printk(KERN_ERR "%s: Invalid media selected",
- dev->name);
- return -EINVAL;
- }
- return 0;
+ return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
}
static struct net_device_stats *au1000_get_stats(struct net_device *dev)
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index 7f9326e39cc0..41c2f848d2c4 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -40,120 +40,6 @@
#define MULTICAST_FILTER_LIMIT 64
-/* FIXME
- * The PHY defines should be in a separate file.
- */
-
-/* MII register offsets */
-#define MII_CONTROL 0x0000
-#define MII_STATUS 0x0001
-#define MII_PHY_ID0 0x0002
-#define MII_PHY_ID1 0x0003
-#define MII_ANADV 0x0004
-#define MII_ANLPAR 0x0005
-#define MII_AEXP 0x0006
-#define MII_ANEXT 0x0007
-#define MII_LSI_PHY_CONFIG 0x0011
-/* Status register */
-#define MII_LSI_PHY_STAT 0x0012
-#define MII_AMD_PHY_STAT MII_LSI_PHY_STAT
-#define MII_INTEL_PHY_STAT 0x0011
-
-#define MII_AUX_CNTRL 0x0018
-/* mii registers specific to AMD 79C901 */
-#define MII_STATUS_SUMMARY = 0x0018
-
-/* MII Control register bit definitions. */
-#define MII_CNTL_FDX 0x0100
-#define MII_CNTL_RST_AUTO 0x0200
-#define MII_CNTL_ISOLATE 0x0400
-#define MII_CNTL_PWRDWN 0x0800
-#define MII_CNTL_AUTO 0x1000
-#define MII_CNTL_F100 0x2000
-#define MII_CNTL_LPBK 0x4000
-#define MII_CNTL_RESET 0x8000
-
-/* MII Status register bit */
-#define MII_STAT_EXT 0x0001
-#define MII_STAT_JAB 0x0002
-#define MII_STAT_LINK 0x0004
-#define MII_STAT_CAN_AUTO 0x0008
-#define MII_STAT_FAULT 0x0010
-#define MII_STAT_AUTO_DONE 0x0020
-#define MII_STAT_CAN_T 0x0800
-#define MII_STAT_CAN_T_FDX 0x1000
-#define MII_STAT_CAN_TX 0x2000
-#define MII_STAT_CAN_TX_FDX 0x4000
-#define MII_STAT_CAN_T4 0x8000
-
-
-#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
-#define MII_ID1_MODEL 0x03F0 /* model number */
-#define MII_ID1_REV 0x000F /* model number */
-
-/* MII NWAY Register Bits ...
- valid for the ANAR (Auto-Negotiation Advertisement) and
- ANLPAR (Auto-Negotiation Link Partner) registers */
-#define MII_NWAY_NODE_SEL 0x001f
-#define MII_NWAY_CSMA_CD 0x0001
-#define MII_NWAY_T 0x0020
-#define MII_NWAY_T_FDX 0x0040
-#define MII_NWAY_TX 0x0080
-#define MII_NWAY_TX_FDX 0x0100
-#define MII_NWAY_T4 0x0200
-#define MII_NWAY_PAUSE 0x0400
-#define MII_NWAY_RF 0x2000 /* Remote Fault */
-#define MII_NWAY_ACK 0x4000 /* Remote Acknowledge */
-#define MII_NWAY_NP 0x8000 /* Next Page (Enable) */
-
-/* mii stsout register bits */
-#define MII_STSOUT_LINK_FAIL 0x4000
-#define MII_STSOUT_SPD 0x0080
-#define MII_STSOUT_DPLX 0x0040
-
-/* mii stsics register bits */
-#define MII_STSICS_SPD 0x8000
-#define MII_STSICS_DPLX 0x4000
-#define MII_STSICS_LINKSTS 0x0001
-
-/* mii stssum register bits */
-#define MII_STSSUM_LINK 0x0008
-#define MII_STSSUM_DPLX 0x0004
-#define MII_STSSUM_AUTO 0x0002
-#define MII_STSSUM_SPD 0x0001
-
-/* lsi phy status register */
-#define MII_LSI_PHY_STAT_FDX 0x0040
-#define MII_LSI_PHY_STAT_SPD 0x0080
-
-/* amd phy status register */
-#define MII_AMD_PHY_STAT_FDX 0x0800
-#define MII_AMD_PHY_STAT_SPD 0x0400
-
-/* intel phy status register */
-#define MII_INTEL_PHY_STAT_FDX 0x0200
-#define MII_INTEL_PHY_STAT_SPD 0x4000
-
-/* Auxilliary Control/Status Register */
-#define MII_AUX_FDX 0x0001
-#define MII_AUX_100 0x0002
-#define MII_AUX_F100 0x0004
-#define MII_AUX_ANEG 0x0008
-
-typedef struct mii_phy {
- struct mii_phy * next;
- struct mii_chip_info * chip_info;
- u16 status;
- u32 *mii_control_reg;
- u32 *mii_data_reg;
-} mii_phy_t;
-
-struct phy_ops {
- int (*phy_init) (struct net_device *, int);
- int (*phy_reset) (struct net_device *, int);
- int (*phy_status) (struct net_device *, int, u16 *, u16 *);
-};
-
/*
* Data Buffer Descriptor. Data buffers must be aligned on 32 byte
* boundary for both, receive and transmit.
@@ -200,7 +86,6 @@ typedef struct mac_reg {
struct au1000_private {
-
db_dest_t *pDBfree;
db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS];
volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA];
@@ -213,8 +98,15 @@ struct au1000_private {
u32 tx_full;
int mac_id;
- mii_phy_t *mii;
- struct phy_ops *phy_ops;
+
+ int mac_enabled; /* whether MAC is currently enabled and running (req. for mdio) */
+
+ int old_link; /* used by au1000_adjust_link */
+ int old_speed;
+ int old_duplex;
+
+ struct phy_device *phy_dev;
+ struct mii_bus mii_bus;
/* These variables are just for quick access to certain regs addresses. */
volatile mac_reg_t *mac; /* mac registers */
@@ -223,14 +115,6 @@ struct au1000_private {
u32 vaddr; /* virtual address of rx/tx buffers */
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
- u8 *hash_table;
- u32 hash_mode;
- u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
- int phy_addr; /* phy address */
- u32 options; /* User-settable misc. driver options. */
- u32 drv_flags;
- int want_autoneg;
struct net_device_stats stats;
- struct timer_list timer;
spinlock_t lock; /* Serialise access to device */
};
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index ac48f7543500..39f36aa05aa8 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4877,7 +4877,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int cas_version_printed = 0;
- unsigned long casreg_base, casreg_len;
+ unsigned long casreg_len;
struct net_device *dev;
struct cas *cp;
int i, err, pci_using_dac;
@@ -4972,7 +4972,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_using_dac = 0;
}
- casreg_base = pci_resource_start(pdev, 0);
casreg_len = pci_resource_len(pdev, 0);
cp = netdev_priv(dev);
@@ -5024,7 +5023,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
cp->timer_ticks = 0;
/* give us access to cassini registers */
- cp->regs = ioremap(casreg_base, casreg_len);
+ cp->regs = pci_iomap(pdev, 0, casreg_len);
if (cp->regs == 0UL) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
@@ -5123,7 +5122,7 @@ err_out_iounmap:
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
- iounmap(cp->regs);
+ pci_iounmap(pdev, cp->regs);
err_out_free_res:
@@ -5171,7 +5170,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
#endif
pci_free_consistent(pdev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma);
- iounmap(cp->regs);
+ pci_iounmap(pdev, cp->regs);
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 31ac001f5517..f37170cc1a37 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2780,6 +2780,80 @@ static void e100_shutdown(struct pci_dev *pdev)
DPRINTK(PROBE,ERR, "Error enabling wake\n");
}
+/* ------------------ PCI Error Recovery infrastructure -------------- */
+/**
+ * e100_io_error_detected - called when PCI error is detected.
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ */
+static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ /* Similar to calling e100_down(), but avoids adpater I/O. */
+ netdev->stop(netdev);
+
+ /* Detach; put netif into state similar to hotplug unplug. */
+ netif_poll_enable(netdev);
+ netif_device_detach(netdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e100_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch.
+ */
+static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ /* Only one device per card can do a reset */
+ if (0 != PCI_FUNC(pdev->devfn))
+ return PCI_ERS_RESULT_RECOVERED;
+ e100_hw_reset(nic);
+ e100_phy_init(nic);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e100_io_resume - resume normal operations
+ * @pdev: Pointer to PCI device
+ *
+ * Resume normal operations after an error recovery
+ * sequence has been completed.
+ */
+static void e100_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, 0, 0);
+
+ netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ e100_open(netdev);
+ mod_timer(&nic->watchdog, jiffies);
+ }
+}
+
+static struct pci_error_handlers e100_err_handler = {
+ .error_detected = e100_io_error_detected,
+ .slot_reset = e100_io_slot_reset,
+ .resume = e100_io_resume,
+};
static struct pci_driver e100_driver = {
.name = DRV_NAME,
@@ -2791,6 +2865,7 @@ static struct pci_driver e100_driver = {
.resume = e100_resume,
#endif
.shutdown = e100_shutdown,
+ .err_handler = &e100_err_handler,
};
static int __init e100_init_module(void)
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ca9f89552da3..5dea2b7dea4d 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
#
-# Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 281de41d030a..2bc34fbfa69c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -114,6 +115,8 @@ struct e1000_adapter;
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
@@ -334,7 +337,6 @@ struct e1000_adapter {
boolean_t have_msi;
#endif
/* to not mess up cache alignment, always add to the bottom */
- boolean_t txb2b;
#ifdef NETIF_F_TSO
boolean_t tso_force;
#endif
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ecccca35c6f4..6ed7f599eba3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -864,19 +865,22 @@ static int
e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
{
struct net_device *netdev = adapter->netdev;
- uint32_t mask, i=0, shared_int = TRUE;
- uint32_t irq = adapter->pdev->irq;
+ uint32_t mask, i=0, shared_int = TRUE;
+ uint32_t irq = adapter->pdev->irq;
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+ if (!request_irq(irq, &e1000_test_intr, SA_PROBEIRQ, netdev->name,
+ netdev)) {
shared_int = FALSE;
} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
*data = 1;
return -1;
}
+ DPRINTK(PROBE,INFO, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
@@ -888,22 +892,22 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Interrupt to test */
mask = 1 << i;
- if (!shared_int) {
- /* Disable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMC, mask);
- E1000_WRITE_REG(&adapter->hw, ICS, mask);
- msec_delay(10);
-
- if (adapter->test_icr & mask) {
- *data = 3;
- break;
- }
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMC, mask);
+ E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ msec_delay(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
}
/* Enable the interrupt to be reported in
@@ -922,7 +926,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
break;
}
- if (!shared_int) {
+ if (!shared_int) {
/* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 523c2c9fc0ac..3959039b16ec 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -764,7 +765,7 @@ e1000_init_hw(struct e1000_hw *hw)
}
if (hw->mac_type == e1000_82573) {
- e1000_enable_tx_pkt_filtering(hw);
+ e1000_enable_tx_pkt_filtering(hw);
}
switch (hw->mac_type) {
@@ -860,7 +861,7 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
if(eeprom_data != EEPROM_RESERVED_WORD) {
/* Adjust SERDES output amplitude only. */
- eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
if(ret_val)
return ret_val;
@@ -1227,7 +1228,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
if (hw->phy_reset_disable)
return E1000_SUCCESS;
-
+
ret_val = e1000_phy_reset(hw);
if (ret_val) {
DEBUGOUT("Error Resetting the PHY\n");
@@ -1369,7 +1370,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_ggp_setup");
if(!hw->phy_reset_disable) {
-
+
/* Enable CRS on TX for half-duplex operation. */
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
&phy_data);
@@ -1518,7 +1519,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
if(hw->phy_reset_disable)
return E1000_SUCCESS;
-
+
/* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if(ret_val)
@@ -1664,7 +1665,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
* collision distance in the Transmit Control Register.
* 2) Set up flow control on the MAC to that established with
* the link partner.
-* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
@@ -1673,7 +1674,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
{
int32_t ret_val;
DEBUGFUNC("e1000_copper_link_postconfig");
-
+
if(hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
@@ -1697,7 +1698,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
return ret_val;
}
}
-
+
return E1000_SUCCESS;
}
@@ -1753,11 +1754,11 @@ e1000_setup_copper_link(struct e1000_hw *hw)
}
if(hw->autoneg) {
- /* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
ret_val = e1000_copper_link_autoneg(hw);
if(ret_val)
- return ret_val;
+ return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
@@ -1785,7 +1786,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
ret_val = e1000_copper_link_postconfig(hw);
if(ret_val)
return ret_val;
-
+
DEBUGOUT("Valid link established!!!\n");
return E1000_SUCCESS;
}
@@ -1983,7 +1984,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
if(ret_val)
return ret_val;
@@ -2272,7 +2273,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
DEBUGFUNC("e1000_config_mac_to_phy");
- /* 82544 or newer MAC, Auto Speed Detection takes care of
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
if (hw->mac_type >= e1000_82544)
return E1000_SUCCESS;
@@ -2291,9 +2292,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
if(ret_val)
return ret_val;
- if(phy_data & M88E1000_PSSR_DPLX)
+ if(phy_data & M88E1000_PSSR_DPLX)
ctrl |= E1000_CTRL_FD;
- else
+ else
ctrl &= ~E1000_CTRL_FD;
e1000_config_collision_dist(hw);
@@ -2492,10 +2493,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
if(hw->original_fc == e1000_fc_full) {
hw->fc = e1000_fc_full;
- DEBUGOUT("Flow Control = FULL.\r\n");
+ DEBUGOUT("Flow Control = FULL.\n");
} else {
hw->fc = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
@@ -2511,7 +2512,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc = e1000_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
@@ -2526,7 +2527,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could
@@ -2552,10 +2553,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
hw->original_fc == e1000_fc_tx_pause) ||
hw->fc_strict_ieee) {
hw->fc = e1000_fc_none;
- DEBUGOUT("Flow Control = NONE.\r\n");
+ DEBUGOUT("Flow Control = NONE.\n");
} else {
hw->fc = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
/* Now we need to do one last check... If we auto-
@@ -2580,7 +2581,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
} else {
- DEBUGOUT("Copper PHY and Auto Neg has not completed.\r\n");
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
}
}
return E1000_SUCCESS;
@@ -2763,7 +2764,7 @@ e1000_check_for_link(struct e1000_hw *hw)
hw->autoneg_failed = 1;
return 0;
}
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2788,7 +2789,7 @@ e1000_check_for_link(struct e1000_hw *hw)
else if(((hw->media_type == e1000_media_type_fiber) ||
(hw->media_type == e1000_media_type_internal_serdes)) &&
(ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\r\n");
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
E1000_WRITE_REG(hw, TXCW, hw->txcw);
E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2851,13 +2852,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
if(status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\r\n");
+ DEBUGOUT("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\r\n");
+ DEBUGOUT(" Half Duplex\n");
}
} else {
- DEBUGOUT("1000 Mbs, Full Duplex\r\n");
+ DEBUGOUT("1000 Mbs, Full Duplex\n");
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
}
@@ -2883,7 +2884,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
}
}
- if ((hw->mac_type == e1000_80003es2lan) &&
+ if ((hw->mac_type == e1000_80003es2lan) &&
(hw->media_type == e1000_media_type_copper)) {
if (*speed == SPEED_1000)
ret_val = e1000_configure_kmrn_for_1000(hw);
@@ -3159,7 +3160,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if((hw->phy_type == e1000_phy_igp ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3298,7 +3299,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if((hw->phy_type == e1000_phy_igp ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3496,22 +3497,22 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
}
/* Read the device control register and assert the E1000_CTRL_PHY_RST
* bit. Then, take it out of reset.
- * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+ * For pre-e1000_82571 hardware, we delay for 10ms between the assert
* and deassert. For e1000_82571 hardware and later, we instead delay
* for 50us between and 10ms after the deassertion.
*/
ctrl = E1000_READ_REG(hw, CTRL);
E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
E1000_WRITE_FLUSH(hw);
-
- if (hw->mac_type < e1000_82571)
+
+ if (hw->mac_type < e1000_82571)
msec_delay(10);
else
udelay(100);
-
+
E1000_WRITE_REG(hw, CTRL, ctrl);
E1000_WRITE_FLUSH(hw);
-
+
if (hw->mac_type >= e1000_82571)
msec_delay(10);
e1000_swfw_sync_release(hw, swfw);
@@ -3815,7 +3816,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
/* Check polarity status */
ret_val = e1000_check_polarity(hw, &polarity);
if(ret_val)
- return ret_val;
+ return ret_val;
phy_info->cable_polarity = polarity;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
@@ -4540,14 +4541,14 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
E1000_WRITE_REG(hw, EERD, eerd);
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
-
+
if(error) {
break;
}
data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
-
+
}
-
+
return error;
}
@@ -4573,24 +4574,24 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
return -E1000_ERR_SWFW_SYNC;
for (i = 0; i < words; i++) {
- register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
- ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+ register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+ ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
E1000_EEPROM_RW_REG_START;
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
if(error) {
break;
- }
+ }
E1000_WRITE_REG(hw, EEWR, register_value);
-
+
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
-
+
if(error) {
break;
- }
+ }
}
-
+
e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
return error;
}
@@ -4610,7 +4611,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
for(i = 0; i < attempts; i++) {
if(eerd == E1000_EEPROM_POLL_READ)
reg = E1000_READ_REG(hw, EERD);
- else
+ else
reg = E1000_READ_REG(hw, EEWR);
if(reg & E1000_EEPROM_RW_REG_DONE) {
@@ -5135,7 +5136,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
uint32_t i;
uint32_t num_rar_entry;
uint32_t num_mta_entry;
-
+
DEBUGFUNC("e1000_mc_addr_list_update");
/* Set the new number of MC addresses that we are being requested to use. */
@@ -6240,7 +6241,7 @@ e1000_check_polarity(struct e1000_hw *hw,
* 1 - Downshift ocured.
*
* returns: - E1000_ERR_XXX
- * E1000_SUCCESS
+ * E1000_SUCCESS
*
* For phy's older then IGP, this function reads the Downshift bit in the Phy
* Specific Status register. For IGP phy's, it reads the Downgrade bit in the
@@ -6255,7 +6256,7 @@ e1000_check_downshift(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_downshift");
- if(hw->phy_type == e1000_phy_igp ||
+ if(hw->phy_type == e1000_phy_igp ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
@@ -6684,8 +6685,8 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
} else {
-
- phy_data |= IGP02E1000_PM_D0_LPLU;
+
+ phy_data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
if (ret_val)
return ret_val;
@@ -6777,7 +6778,7 @@ int32_t
e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
{
uint8_t i;
- uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+ uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
length = (length >> 2);
@@ -6796,7 +6797,7 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
* and also checks whether the previous command is completed.
* It busy waits in case of previous command is not completed.
*
- * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
* timeout
* - E1000_SUCCESS for success.
****************************************************************************/
@@ -6820,7 +6821,7 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
msec_delay_irq(1);
}
- if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
DEBUGOUT("Previous command timeout failed .\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 150e45e30f87..467c9ed944f8 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -374,7 +375,7 @@ struct e1000_host_mng_dhcp_cookie{
};
#endif
-int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
+int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
@@ -1801,7 +1802,7 @@ struct e1000_hw {
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
-
+
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ed15fcaedaf9..a373ccb308d8 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,51 +22,13 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000.h"
-/* Change Log
- * 7.0.33 3-Feb-2006
- * o Added another fix for the pass false carrier bit
- * 7.0.32 24-Jan-2006
- * o Need to rebuild with noew version number for the pass false carrier
- * fix in e1000_hw.c
- * 7.0.30 18-Jan-2006
- * o fixup for tso workaround to disable it for pci-x
- * o fix mem leak on 82542
- * o fixes for 10 Mb/s connections and incorrect stats
- * 7.0.28 01/06/2006
- * o hardware workaround to only set "speed mode" bit for 1G link.
- * 7.0.26 12/23/2005
- * o wake on lan support modified for device ID 10B5
- * o fix dhcp + vlan issue not making it to the iAMT firmware
- * 7.0.24 12/9/2005
- * o New hardware support for the Gigabit NIC embedded in the south bridge
- * o Fixes to the recycling logic (skb->tail) from IBM LTC
- * 6.3.9 12/16/2005
- * o incorporate fix for recycled skbs from IBM LTC
- * 6.3.7 11/18/2005
- * o Honor eeprom setting for enabling/disabling Wake On Lan
- * 6.3.5 11/17/2005
- * o Fix memory leak in rx ring handling for PCI Express adapters
- * 6.3.4 11/8/05
- * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
- * 6.3.2 9/20/05
- * o Render logic that sets/resets DRV_LOAD as inline functions to
- * avoid code replication. If f/w is AMT then set DRV_LOAD only when
- * network interface is open.
- * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
- * o Adjust PBA partioning for Jumbo frames using MTU size and not
- * rx_buffer_len
- * 6.3.1 9/19/05
- * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
- * (e1000_clean_tx_irq)
- * o Support for 8086:10B5 device (Quad Port)
- */
-
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#ifndef CONFIG_E1000_NAPI
@@ -74,9 +36,9 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
+#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
-static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
*
@@ -208,8 +170,8 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_tx_timeout(struct net_device *dev);
static void e1000_reset_task(struct net_device *dev);
static void e1000_smartspeed(struct e1000_adapter *adapter);
-static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
- struct sk_buff *skb);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -227,6 +189,16 @@ static void e1000_shutdown(struct pci_dev *pdev);
static void e1000_netpoll (struct net_device *netdev);
#endif
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
@@ -238,7 +210,8 @@ static struct pci_driver e1000_driver = {
.suspend = e1000_suspend,
.resume = e1000_resume,
#endif
- .shutdown = e1000_shutdown
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
};
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -293,7 +266,7 @@ module_exit(e1000_exit_module);
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
@@ -307,7 +280,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
@@ -348,10 +321,10 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the netowrk i/f is closed.
- *
+ *
**/
-static inline void
+static void
e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
@@ -361,6 +334,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
+ case e1000_80003es2lan:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
@@ -379,13 +353,13 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
* @adapter: address of board private structure
*
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the netowrk i/f is open.
- *
+ *
**/
-static inline void
+static void
e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
@@ -394,6 +368,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
+ case e1000_80003es2lan:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
@@ -421,7 +396,7 @@ e1000_up(struct e1000_adapter *adapter)
uint16_t mii_reg;
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
if (mii_reg & MII_CR_POWER_DOWN)
- e1000_phy_reset(&adapter->hw);
+ e1000_phy_hw_reset(&adapter->hw);
}
e1000_set_multi(netdev);
@@ -711,8 +686,8 @@ e1000_probe(struct pci_dev *pdev,
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
/* if ksp3, indicate if it's port a being setup */
- if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
- e1000_ksp3_port_a == 0)
+ if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
+ e1000_ksp3_port_a == 0)
adapter->ksp3_port_a = 1;
e1000_ksp3_port_a++;
/* Reset for multiple KP3 adapters */
@@ -740,9 +715,9 @@ e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /* hard_start_xmit is safe against parallel locking */
- netdev->features |= NETIF_F_LLTX;
-
+ /* hard_start_xmit is safe against parallel locking */
+ netdev->features |= NETIF_F_LLTX;
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
/* before reading the EEPROM, reset the controller to
@@ -972,8 +947,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+ adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
hw->max_frame_size = netdev->mtu +
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1181,7 +1156,7 @@ e1000_close(struct net_device *netdev)
* @start: address of beginning of memory
* @len: length of memory
**/
-static inline boolean_t
+static boolean_t
e1000_check_64k_bound(struct e1000_adapter *adapter,
void *start, unsigned long len)
{
@@ -1599,14 +1574,21 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
- if (adapter->hw.mac_type >= e1000_82571) {
- /* We can now specify buffers in 1K increments.
- * BSIZE and BSEX are ignored in this case. */
- rctl |= adapter->rx_buffer_len << 0x11;
- } else {
- rctl &= ~E1000_RCTL_SZ_4096;
- rctl |= E1000_RCTL_BSEX;
- switch (adapter->rx_buffer_len) {
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -1621,7 +1603,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
case E1000_RXBUFFER_16384:
rctl |= E1000_RCTL_SZ_16384;
break;
- }
}
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -1715,7 +1696,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
if (hw->mac_type >= e1000_82571) {
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
/* Reset delay timers after every interrupt */
- ctrl_ext |= E1000_CTRL_EXT_CANC;
+ ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
#ifdef CONFIG_E1000_NAPI
/* Auto-Mask interrupts upon ICR read. */
ctrl_ext |= E1000_CTRL_EXT_IAME;
@@ -1807,7 +1788,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
-static inline void
+static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
@@ -2247,6 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
if (link) {
if (!netif_carrier_ok(netdev)) {
+ boolean_t txb2b = 1;
e1000_get_speed_and_duplex(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -2260,23 +2242,22 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
* and adjust the timeout factor */
netdev->tx_queue_len = adapter->tx_queue_len;
adapter->tx_timeout_factor = 1;
- adapter->txb2b = 1;
switch (adapter->link_speed) {
case SPEED_10:
- adapter->txb2b = 0;
+ txb2b = 0;
netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 8;
break;
case SPEED_100:
- adapter->txb2b = 0;
+ txb2b = 0;
netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
- if ((adapter->hw.mac_type == e1000_82571 ||
+ if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
- adapter->txb2b == 0) {
+ txb2b == 0) {
#define SPEED_MODE_BIT (1 << 21)
uint32_t tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
@@ -2400,7 +2381,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static inline int
+static int
e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
@@ -2422,7 +2403,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
mss = skb_shinfo(skb)->tso_size;
- if (skb->protocol == ntohs(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP)) {
skb->nh.iph->tot_len = 0;
skb->nh.iph->check = 0;
skb->h.th->check =
@@ -2480,7 +2461,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return FALSE;
}
-static inline boolean_t
+static boolean_t
e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
@@ -2516,7 +2497,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_MAX_TXD_PWR 12
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
-static inline int
+static int
e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
unsigned int nr_frags, unsigned int mss)
@@ -2625,7 +2606,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return count;
}
-static inline void
+static void
e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
int tx_flags, int count)
{
@@ -2689,7 +2670,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
-static inline int
+static int
e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
{
uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
@@ -2716,7 +2697,7 @@ no_fifo_stall_required:
}
#define MINIMUM_DHCP_PACKET_SIZE 282
-static inline int
+static int
e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
@@ -2764,7 +2745,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int nr_frags = 0;
unsigned int mss = 0;
int count = 0;
- int tso;
+ int tso;
unsigned int f;
len -= skb->data_len;
@@ -2777,7 +2758,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size;
- /* The controller does a simple calculation to
+ /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
@@ -2800,7 +2781,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
case e1000_82573:
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- printk(KERN_ERR
+ printk(KERN_ERR
"__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2901,7 +2882,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must. */
- if (likely(skb->protocol == ntohs(ETH_P_IP)))
+ if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
e1000_tx_queue(adapter, tx_ring, tx_flags,
@@ -2982,8 +2963,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Adapter-specific max frame size limits. */
switch (adapter->hw.mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
+ case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
@@ -3017,27 +2997,32 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
break;
}
-
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->rx_buffer_len = max_frame;
- E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
- } else {
- if(unlikely((adapter->hw.mac_type < e1000_82543) &&
- (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
- "on 82542\n");
- return -EINVAL;
- } else {
- if(max_frame <= E1000_RXBUFFER_2048)
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- else if(max_frame <= E1000_RXBUFFER_4096)
- adapter->rx_buffer_len = E1000_RXBUFFER_4096;
- else if(max_frame <= E1000_RXBUFFER_8192)
- adapter->rx_buffer_len = E1000_RXBUFFER_8192;
- else if(max_frame <= E1000_RXBUFFER_16384)
- adapter->rx_buffer_len = E1000_RXBUFFER_16384;
- }
- }
+ /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+ if (max_frame <= E1000_RXBUFFER_256)
+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
+ else if (max_frame <= E1000_RXBUFFER_512)
+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
+ else if (max_frame <= E1000_RXBUFFER_1024)
+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+ else if (max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else if (max_frame <= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+ else if (max_frame <= E1000_RXBUFFER_8192)
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+ else if (max_frame <= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+ if (!adapter->hw.tbi_compatibility_on &&
+ ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
netdev->mtu = new_mtu;
@@ -3060,11 +3045,21 @@ void
e1000_update_stats(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
unsigned long flags;
uint16_t phy_tmp;
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+ return;
+
spin_lock_irqsave(&adapter->stats_lock, flags);
/* these counters are modified from e1000_adjust_tbi_stats,
@@ -3165,7 +3160,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.cexterr;
- adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.ruc +
adapter->stats.roc;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -3391,13 +3385,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
tx_ring->next_to_clean = i;
- spin_lock(&tx_ring->tx_lock);
-
+#define TX_WAKE_THRESHOLD 32
if (unlikely(cleaned && netif_queue_stopped(netdev) &&
- netif_carrier_ok(netdev)))
- netif_wake_queue(netdev);
-
- spin_unlock(&tx_ring->tx_lock);
+ netif_carrier_ok(netdev))) {
+ spin_lock(&tx_ring->tx_lock);
+ if (netif_queue_stopped(netdev) &&
+ (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
+ netif_wake_queue(netdev);
+ spin_unlock(&tx_ring->tx_lock);
+ }
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
@@ -3445,7 +3441,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
-static inline void
+static void
e1000_rx_checksum(struct e1000_adapter *adapter,
uint32_t status_err, uint32_t csum,
struct sk_buff *skb)
@@ -3519,7 +3515,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (rx_desc->status & E1000_RXD_STAT_DD) {
- struct sk_buff *skb, *next_skb;
+ struct sk_buff *skb;
u8 status;
#ifdef CONFIG_E1000_NAPI
if (*work_done >= work_to_do)
@@ -3537,8 +3533,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
- next_skb = next_buffer->skb;
- prefetch(next_skb->data - NET_IP_ALIGN);
cleaned = TRUE;
cleaned_count++;
@@ -3569,7 +3563,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
flags);
length--;
} else {
- dev_kfree_skb_irq(skb);
+ /* recycle */
+ buffer_info->skb = skb;
goto next_desc;
}
}
@@ -3668,7 +3663,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info, *next_buffer;
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
- struct sk_buff *skb, *next_skb;
+ struct sk_buff *skb;
unsigned int i, j;
uint32_t length, staterr;
int cleaned_count = 0;
@@ -3677,6 +3672,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i];
@@ -3697,8 +3693,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
- next_skb = next_buffer->skb;
- prefetch(next_skb->data - NET_IP_ALIGN);
cleaned = TRUE;
cleaned_count++;
@@ -3737,9 +3731,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
/* page alloc/put takes too long and effects small packet
* throughput, so unsplit small packets and save the alloc/put*/
- if (l1 && ((length + l1) < E1000_CB_LENGTH)) {
+ if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
u8 *vaddr;
- /* there is no documentation about how to call
+ /* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
* very long */
pci_dma_sync_single_for_cpu(pdev,
@@ -4159,7 +4153,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
- if (adapter->hw.phy_type == e1000_media_type_copper) {
+ if (adapter->hw.media_type == e1000_media_type_copper) {
switch (data->reg_num) {
case PHY_CTRL:
if (mii_reg & MII_CR_POWER_DOWN)
@@ -4518,21 +4512,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
- retval = pci_enable_wake(pdev, PCI_D3hot, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
} else {
E1000_WRITE_REG(&adapter->hw, WUC, 0);
E1000_WRITE_REG(&adapter->hw, WUFC, 0);
- retval = pci_enable_wake(pdev, PCI_D3hot, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
}
if (adapter->hw.mac_type >= e1000_82540 &&
@@ -4541,13 +4527,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc);
- retval = pci_enable_wake(pdev, PCI_D3hot, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 1);
- if (retval)
- DPRINTK(PROBE, ERR,
- "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
}
}
@@ -4557,9 +4538,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
- retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
- if (retval)
- DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
@@ -4570,22 +4549,15 @@ e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- int retval;
uint32_t manc, ret_val;
- retval = pci_set_power_state(pdev, PCI_D0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ pci_set_power_state(pdev, PCI_D0);
e1000_pci_restore_state(adapter);
ret_val = pci_enable_device(pdev);
pci_set_master(pdev);
- retval = pci_enable_wake(pdev, PCI_D3hot, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4639,4 +4611,101 @@ e1000_netpoll(struct net_device *netdev)
}
#endif
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ e1000_down(adapter);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, 3, 0);
+ pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+
+ /* Perform card reset only on one instance of the card */
+ if (PCI_FUNC (pdev->devfn) != 0)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ e1000_reset(adapter);
+ E1000_WRITE_REG(&adapter->hw, WUS, ~0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t manc, swsm;
+
+ if (netif_running(netdev)) {
+ if (e1000_up(adapter)) {
+ printk("e1000: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
+ manc = E1000_READ_REG(&adapter->hw, MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ }
+
+ switch (adapter->hw.mac_type) {
+ case e1000_82573:
+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
+ E1000_WRITE_REG(&adapter->hw, SWSM,
+ swsm | E1000_SWSM_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+
+ if (netif_running(netdev))
+ mod_timer(&adapter->watchdog_timer, jiffies);
+}
+
/* e1000_main.c */
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 9790db974dc1..048d052be29d 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e0a4d37d1b85..e55f8969a0fb 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 2f7b86837fe8..8d680ce600d7 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -21,15 +21,15 @@
http://www.scyld.com/network/epic100.html
---------------------------------------------------------------------
-
+
Linux kernel-specific changes:
-
+
LK1.1.2 (jgarzik):
* Merge becker version 1.09 (4/08/2000)
LK1.1.3:
* Major bugfix to 1.09 driver (Francis Romieu)
-
+
LK1.1.4 (jgarzik):
* Merge becker test version 1.09 (5/29/2000)
@@ -66,7 +66,7 @@
LK1.1.14 (Kryzsztof Halasa):
* fix spurious bad initializations
* pound phy a la SMSC's app note on the subject
-
+
AC1.1.14ac
* fix power up/down for ethtool that broke in 1.11
@@ -244,7 +244,7 @@ static struct pci_device_id epic_pci_tbl[] = {
};
MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
-
+
#ifndef USE_IO_OPS
#undef inb
#undef inw
@@ -370,7 +370,7 @@ static int epic_close(struct net_device *dev);
static struct net_device_stats *epic_get_stats(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-
+
static int __devinit epic_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -392,9 +392,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
version, version2, version3);
#endif
-
+
card_idx++;
-
+
ret = pci_enable_device(pdev);
if (ret)
goto out;
@@ -405,7 +405,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ret = -ENODEV;
goto err_out_disable;
}
-
+
pci_set_master(pdev);
ret = pci_request_regions(pdev, DRV_NAME);
@@ -498,7 +498,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ep->pci_dev = pdev;
ep->chip_id = chip_idx;
ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
- ep->irq_mask =
+ ep->irq_mask =
(ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | EpicNapiEvent;
@@ -587,7 +587,7 @@ err_out_disable:
pci_disable_device(pdev);
goto out;
}
-
+
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
@@ -709,7 +709,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
outw(value, ioaddr + MIIData);
outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
- for (i = 10000; i > 0; i--) {
+ for (i = 10000; i > 0; i--) {
barrier();
if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
break;
@@ -717,7 +717,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
return;
}
-
+
static int epic_open(struct net_device *dev)
{
struct epic_private *ep = dev->priv;
@@ -760,7 +760,7 @@ static int epic_open(struct net_device *dev)
#endif
udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
-
+
for (i = 0; i < 3; i++)
outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
@@ -803,7 +803,7 @@ static int epic_open(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
- | CntFull | TxUnderrun
+ | CntFull | TxUnderrun
| RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
if (debug > 1)
@@ -831,7 +831,7 @@ static void epic_pause(struct net_device *dev)
struct epic_private *ep = dev->priv;
netif_stop_queue (dev);
-
+
/* Disable interrupts by clearing the interrupt mask. */
outl(0x00000000, ioaddr + INTMASK);
/* Stop the chip's Tx and Rx DMA processes. */
@@ -987,7 +987,7 @@ static void epic_init_ring(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
ep->rx_ring[i].rxstatus = 0;
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
- ep->rx_ring[i].next = ep->rx_ring_dma +
+ ep->rx_ring[i].next = ep->rx_ring_dma +
(i+1)*sizeof(struct epic_rx_desc);
ep->rx_skbuff[i] = NULL;
}
@@ -1002,7 +1002,7 @@ static void epic_init_ring(struct net_device *dev)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
+ ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
}
@@ -1013,7 +1013,7 @@ static void epic_init_ring(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
ep->tx_skbuff[i] = NULL;
ep->tx_ring[i].txstatus = 0x0000;
- ep->tx_ring[i].next = ep->tx_ring_dma +
+ ep->tx_ring[i].next = ep->tx_ring_dma +
(i+1)*sizeof(struct epic_tx_desc);
}
ep->tx_ring[i-1].next = ep->tx_ring_dma;
@@ -1026,7 +1026,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, free_count;
u32 ctrl_word;
unsigned long flags;
-
+
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
@@ -1042,7 +1042,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
- ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
+ ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
@@ -1126,7 +1126,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
- pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
@@ -1281,8 +1281,8 @@ static int epic_rx(struct net_device *dev, int budget)
ep->rx_buf_sz,
PCI_DMA_FROMDEVICE);
} else {
- pci_unmap_single(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
+ pci_unmap_single(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb = ep->rx_skbuff[entry], pkt_len);
ep->rx_skbuff[entry] = NULL;
@@ -1307,7 +1307,7 @@ static int epic_rx(struct net_device *dev, int budget)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
work_done++;
}
@@ -1403,7 +1403,7 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
- pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+ pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
}
@@ -1414,7 +1414,7 @@ static int epic_close(struct net_device *dev)
ep->tx_skbuff[i] = NULL;
if (!skb)
continue;
- pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
}
@@ -1607,7 +1607,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = dev->priv;
-
+
pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
unregister_netdev(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 705e1229d89d..4ab39c554d0d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -107,6 +107,8 @@
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
+ * 0.55: 22 Mar 2006: Add flow control (pause frame).
+ * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +120,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.54"
+#define FORCEDETH_VERSION "0.56"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -163,6 +165,9 @@
#define DEV_HAS_MSI 0x0040 /* device supports MSI */
#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
+#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
+#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */
+#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */
enum {
NvRegIrqStatus = 0x000,
@@ -203,6 +208,7 @@ enum {
NvRegMSIIrqMask = 0x030,
#define NVREG_MSI_VECTOR_0_ENABLED 0x01
NvRegMisc1 = 0x080,
+#define NVREG_MISC1_PAUSE_TX 0x01
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -214,9 +220,11 @@ enum {
#define NVREG_XMITSTAT_BUSY 0x01
NvRegPacketFilterFlags = 0x8c,
-#define NVREG_PFF_ALWAYS 0x7F0008
+#define NVREG_PFF_PAUSE_RX 0x08
+#define NVREG_PFF_ALWAYS 0x7F0000
#define NVREG_PFF_PROMISC 0x80
#define NVREG_PFF_MYADDR 0x20
+#define NVREG_PFF_LOOPBACK 0x10
NvRegOffloadConfig = 0x90,
#define NVREG_OFFLOAD_HOMEPHY 0x601
@@ -277,6 +285,9 @@ enum {
#define NVREG_TXRXCTL_VLANINS 0x00080
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
+ NvRegTxPauseFrame = 0x170,
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -326,6 +337,33 @@ enum {
#define NVREG_POWERSTATE_D1 0x0001
#define NVREG_POWERSTATE_D2 0x0002
#define NVREG_POWERSTATE_D3 0x0003
+ NvRegTxCnt = 0x280,
+ NvRegTxZeroReXmt = 0x284,
+ NvRegTxOneReXmt = 0x288,
+ NvRegTxManyReXmt = 0x28c,
+ NvRegTxLateCol = 0x290,
+ NvRegTxUnderflow = 0x294,
+ NvRegTxLossCarrier = 0x298,
+ NvRegTxExcessDef = 0x29c,
+ NvRegTxRetryErr = 0x2a0,
+ NvRegRxFrameErr = 0x2a4,
+ NvRegRxExtraByte = 0x2a8,
+ NvRegRxLateCol = 0x2ac,
+ NvRegRxRunt = 0x2b0,
+ NvRegRxFrameTooLong = 0x2b4,
+ NvRegRxOverflow = 0x2b8,
+ NvRegRxFCSErr = 0x2bc,
+ NvRegRxFrameAlignErr = 0x2c0,
+ NvRegRxLenErr = 0x2c4,
+ NvRegRxUnicast = 0x2c8,
+ NvRegRxMulticast = 0x2cc,
+ NvRegRxBroadcast = 0x2d0,
+ NvRegTxDef = 0x2d4,
+ NvRegTxFrame = 0x2d8,
+ NvRegRxCnt = 0x2dc,
+ NvRegTxPause = 0x2e0,
+ NvRegRxPause = 0x2e4,
+ NvRegRxDropFrame = 0x2e8,
NvRegVlanControl = 0x300,
#define NVREG_VLANCONTROL_ENABLE 0x2000
NvRegMSIXMap0 = 0x3e0,
@@ -449,16 +487,18 @@ typedef union _ring_type {
/* General driver defaults */
#define NV_WATCHDOG_TIMEO (5*HZ)
-#define RX_RING 128
-#define TX_RING 256
-/*
- * If your nic mysteriously hangs then try to reduce the limits
- * to 1/0: It might be required to set NV_TX_LASTPACKET in the
- * last valid ring entry. But this would be impossible to
- * implement - probably a disassembly error.
+#define RX_RING_DEFAULT 128
+#define TX_RING_DEFAULT 256
+#define RX_RING_MIN 128
+#define TX_RING_MIN 64
+#define RING_MAX_DESC_VER_1 1024
+#define RING_MAX_DESC_VER_2_3 16384
+/*
+ * Difference between the get and put pointers for the tx ring.
+ * This is used to throttle the amount of data outstanding in the
+ * tx ring.
*/
-#define TX_LIMIT_STOP 255
-#define TX_LIMIT_START 254
+#define TX_LIMIT_DIFFERENCE 1
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
@@ -472,8 +512,9 @@ typedef union _ring_type {
#define OOM_REFILL (1+HZ/20)
#define POLL_WAIT (1+HZ/100)
#define LINK_TIMEOUT (3*HZ)
+#define STATS_INTERVAL (10*HZ)
-/*
+/*
* desc_ver values:
* The nic supports three different descriptor types:
* - DESC_VER_1: Original
@@ -506,13 +547,13 @@ typedef union _ring_type {
#define PHY_1000 0x2
#define PHY_HALF 0x100
-/* FIXME: MII defines that should be added to <linux/mii.h> */
-#define MII_1000BT_CR 0x09
-#define MII_1000BT_SR 0x0a
-#define ADVERTISE_1000FULL 0x0200
-#define ADVERTISE_1000HALF 0x0100
-#define LPA_1000FULL 0x0800
-#define LPA_1000HALF 0x0400
+#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
+#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
+#define NV_PAUSEFRAME_RX_ENABLE 0x0004
+#define NV_PAUSEFRAME_TX_ENABLE 0x0008
+#define NV_PAUSEFRAME_RX_REQ 0x0010
+#define NV_PAUSEFRAME_TX_REQ 0x0020
+#define NV_PAUSEFRAME_AUTONEG 0x0040
/* MSI/MSI-X defines */
#define NV_MSI_X_MAX_VECTORS 8
@@ -527,6 +568,101 @@ typedef union _ring_type {
#define NV_MSI_X_VECTOR_TX 0x1
#define NV_MSI_X_VECTOR_OTHER 0x2
+/* statistics */
+struct nv_ethtool_str {
+ char name[ETH_GSTRING_LEN];
+};
+
+static const struct nv_ethtool_str nv_estats_str[] = {
+ { "tx_bytes" },
+ { "tx_zero_rexmt" },
+ { "tx_one_rexmt" },
+ { "tx_many_rexmt" },
+ { "tx_late_collision" },
+ { "tx_fifo_errors" },
+ { "tx_carrier_errors" },
+ { "tx_excess_deferral" },
+ { "tx_retry_error" },
+ { "tx_deferral" },
+ { "tx_packets" },
+ { "tx_pause" },
+ { "rx_frame_error" },
+ { "rx_extra_byte" },
+ { "rx_late_collision" },
+ { "rx_runt" },
+ { "rx_frame_too_long" },
+ { "rx_over_errors" },
+ { "rx_crc_errors" },
+ { "rx_frame_align_error" },
+ { "rx_length_error" },
+ { "rx_unicast" },
+ { "rx_multicast" },
+ { "rx_broadcast" },
+ { "rx_bytes" },
+ { "rx_pause" },
+ { "rx_drop_frame" },
+ { "rx_packets" },
+ { "rx_errors_total" }
+};
+
+struct nv_ethtool_stats {
+ u64 tx_bytes;
+ u64 tx_zero_rexmt;
+ u64 tx_one_rexmt;
+ u64 tx_many_rexmt;
+ u64 tx_late_collision;
+ u64 tx_fifo_errors;
+ u64 tx_carrier_errors;
+ u64 tx_excess_deferral;
+ u64 tx_retry_error;
+ u64 tx_deferral;
+ u64 tx_packets;
+ u64 tx_pause;
+ u64 rx_frame_error;
+ u64 rx_extra_byte;
+ u64 rx_late_collision;
+ u64 rx_runt;
+ u64 rx_frame_too_long;
+ u64 rx_over_errors;
+ u64 rx_crc_errors;
+ u64 rx_frame_align_error;
+ u64 rx_length_error;
+ u64 rx_unicast;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_bytes;
+ u64 rx_pause;
+ u64 rx_drop_frame;
+ u64 rx_packets;
+ u64 rx_errors_total;
+};
+
+/* diagnostics */
+#define NV_TEST_COUNT_BASE 3
+#define NV_TEST_COUNT_EXTENDED 4
+
+static const struct nv_ethtool_str nv_etests_str[] = {
+ { "link (online/offline)" },
+ { "register (offline) " },
+ { "interrupt (offline) " },
+ { "loopback (offline) " }
+};
+
+struct register_test {
+ u32 reg;
+ u32 mask;
+};
+
+static const struct register_test nv_registers_test[] = {
+ { NvRegUnknownSetupReg6, 0x01 },
+ { NvRegMisc1, 0x03c },
+ { NvRegOffloadConfig, 0x03ff },
+ { NvRegMulticastAddrA, 0xffffffff },
+ { NvRegUnknownSetupReg3, 0x0ff },
+ { NvRegWakeUpFlags, 0x07777 },
+ { 0,0 }
+};
+
/*
* SMP locking:
* All hardware access under dev->priv->lock, except the performance
@@ -545,6 +681,7 @@ struct fe_priv {
/* General data:
* Locking: spin_lock(&np->lock); */
struct net_device_stats stats;
+ struct nv_ethtool_stats estats;
int in_shutdown;
u32 linkspeed;
int duplex;
@@ -554,6 +691,7 @@ struct fe_priv {
int wolenabled;
unsigned int phy_oui;
u16 gigabit;
+ int intr_test;
/* General data: RO fields */
dma_addr_t ring_addr;
@@ -573,13 +711,15 @@ struct fe_priv {
*/
ring_type rx_ring;
unsigned int cur_rx, refill_rx;
- struct sk_buff *rx_skbuff[RX_RING];
- dma_addr_t rx_dma[RX_RING];
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_dma;
unsigned int rx_buf_sz;
unsigned int pkt_limit;
struct timer_list oom_kick;
struct timer_list nic_poll;
+ struct timer_list stats_poll;
u32 nic_poll_irq;
+ int rx_ring_size;
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -591,10 +731,13 @@ struct fe_priv {
*/
ring_type tx_ring;
unsigned int next_tx, nic_tx;
- struct sk_buff *tx_skbuff[TX_RING];
- dma_addr_t tx_dma[TX_RING];
- unsigned int tx_dma_len[TX_RING];
+ struct sk_buff **tx_skbuff;
+ dma_addr_t *tx_dma;
+ unsigned int *tx_dma_len;
u32 tx_flags;
+ int tx_ring_size;
+ int tx_limit_start;
+ int tx_limit_stop;
/* vlan fields */
struct vlan_group *vlangrp;
@@ -602,6 +745,9 @@ struct fe_priv {
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
+
+ /* flow control */
+ u32 pause_flags;
};
/*
@@ -612,12 +758,14 @@ static int max_interrupt_work = 5;
/*
* Optimization can be either throuput mode or cpu mode
- *
+ *
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
-#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
-#define NV_OPTIMIZATION_MODE_CPU 1
+enum {
+ NV_OPTIMIZATION_MODE_THROUGHPUT,
+ NV_OPTIMIZATION_MODE_CPU
+};
static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
/*
@@ -630,14 +778,31 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
static int poll_interval = -1;
/*
- * Disable MSI interrupts
+ * MSI interrupts
*/
-static int disable_msi = 0;
+enum {
+ NV_MSI_INT_DISABLED,
+ NV_MSI_INT_ENABLED
+};
+static int msi = NV_MSI_INT_ENABLED;
/*
- * Disable MSIX interrupts
+ * MSIX interrupts
*/
-static int disable_msix = 0;
+enum {
+ NV_MSIX_INT_DISABLED,
+ NV_MSIX_INT_ENABLED
+};
+static int msix = NV_MSIX_INT_ENABLED;
+
+/*
+ * DMA 64bit
+ */
+enum {
+ NV_DMA_64BIT_DISABLED,
+ NV_DMA_64BIT_ENABLED
+};
+static int dma_64bit = NV_DMA_64BIT_ENABLED;
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
@@ -697,7 +862,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
}
if (rxtx_flags & NV_SETUP_TX_RING) {
- writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
}
} else {
if (rxtx_flags & NV_SETUP_RX_RING) {
@@ -705,12 +870,37 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
}
if (rxtx_flags & NV_SETUP_TX_RING) {
- writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
- writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
+ writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+ writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
}
}
}
+static void free_rings(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if(np->rx_ring.orig)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.orig, np->ring_addr);
+ } else {
+ if (np->rx_ring.ex)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.ex, np->ring_addr);
+ }
+ if (np->rx_skbuff)
+ kfree(np->rx_skbuff);
+ if (np->rx_dma)
+ kfree(np->rx_dma);
+ if (np->tx_skbuff)
+ kfree(np->tx_skbuff);
+ if (np->tx_dma)
+ kfree(np->tx_dma);
+ if (np->tx_dma_len)
+ kfree(np->tx_dma_len);
+}
+
static int using_multi_irqs(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -860,7 +1050,7 @@ static int phy_init(struct net_device *dev)
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
@@ -873,14 +1063,14 @@ static int phy_init(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
- mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
if (phyinterface & PHY_RGMII)
mii_control_1000 |= ADVERTISE_1000FULL;
else
mii_control_1000 &= ~ADVERTISE_1000FULL;
- if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
+ if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -918,6 +1108,8 @@ static int phy_init(struct net_device *dev)
return PHY_ERROR;
}
}
+ /* some phys clear out pause advertisment on reset, set it back */
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
/* restart auto negotiation */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1047,7 +1239,7 @@ static int nv_alloc_rx(struct net_device *dev)
while (np->cur_rx != refill_rx) {
struct sk_buff *skb;
- nr = refill_rx % RX_RING;
+ nr = refill_rx % np->rx_ring_size;
if (np->rx_skbuff[nr] == NULL) {
skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
@@ -1076,7 +1268,7 @@ static int nv_alloc_rx(struct net_device *dev)
refill_rx++;
}
np->refill_rx = refill_rx;
- if (np->cur_rx - refill_rx == RX_RING)
+ if (np->cur_rx - refill_rx == np->rx_ring_size)
return 1;
return 0;
}
@@ -1110,14 +1302,14 @@ static void nv_do_rx_refill(unsigned long data)
}
}
-static void nv_init_rx(struct net_device *dev)
+static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
- np->cur_rx = RX_RING;
+ np->cur_rx = np->rx_ring_size;
np->refill_rx = 0;
- for (i = 0; i < RX_RING; i++)
+ for (i = 0; i < np->rx_ring_size; i++)
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->rx_ring.orig[i].FlagLen = 0;
else
@@ -1130,7 +1322,7 @@ static void nv_init_tx(struct net_device *dev)
int i;
np->next_tx = np->nic_tx = 0;
- for (i = 0; i < TX_RING; i++) {
+ for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
else
@@ -1174,8 +1366,8 @@ static void nv_drain_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
unsigned int i;
-
- for (i = 0; i < TX_RING; i++) {
+
+ for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
else
@@ -1189,7 +1381,7 @@ static void nv_drain_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
- for (i = 0; i < RX_RING; i++) {
+ for (i = 0; i < np->rx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->rx_ring.orig[i].FlagLen = 0;
else
@@ -1221,8 +1413,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 tx_flags = 0;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
unsigned int fragments = skb_shinfo(skb)->nr_frags;
- unsigned int nr = (np->next_tx - 1) % TX_RING;
- unsigned int start_nr = np->next_tx % TX_RING;
+ unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
+ unsigned int start_nr = np->next_tx % np->tx_ring_size;
unsigned int i;
u32 offset = 0;
u32 bcnt;
@@ -1238,7 +1430,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&np->lock);
- if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
+ if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
spin_unlock_irq(&np->lock);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
@@ -1247,7 +1439,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the header buffer */
do {
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % TX_RING;
+ nr = (nr + 1) % np->tx_ring_size;
np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
@@ -1274,7 +1466,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do {
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- nr = (nr + 1) % TX_RING;
+ nr = (nr + 1) % np->tx_ring_size;
np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
PCI_DMA_TODEVICE);
@@ -1320,7 +1512,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
- }
+ }
dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
dev->name, np->next_tx, entries, tx_flags_extra);
@@ -1356,7 +1548,7 @@ static void nv_tx_done(struct net_device *dev)
struct sk_buff *skb;
while (np->nic_tx != np->next_tx) {
- i = np->nic_tx % TX_RING;
+ i = np->nic_tx % np->tx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
@@ -1395,13 +1587,13 @@ static void nv_tx_done(struct net_device *dev)
} else {
np->stats.tx_packets++;
np->stats.tx_bytes += skb->len;
- }
+ }
}
}
nv_release_txskb(dev, i);
np->nic_tx++;
}
- if (np->next_tx - np->nic_tx < TX_LIMIT_START)
+ if (np->next_tx - np->nic_tx < np->tx_limit_start)
netif_wake_queue(dev);
}
@@ -1438,10 +1630,10 @@ static void nv_tx_timeout(struct net_device *dev)
readl(base + i + 24), readl(base + i + 28));
}
printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
- for (i=0;i<TX_RING;i+= 4) {
+ for (i=0;i<np->tx_ring_size;i+= 4) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
+ i,
le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
le32_to_cpu(np->tx_ring.orig[i].FlagLen),
le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
@@ -1452,7 +1644,7 @@ static void nv_tx_timeout(struct net_device *dev)
le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
} else {
printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
- i,
+ i,
le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
le32_to_cpu(np->tx_ring.ex[i].FlagLen),
@@ -1550,15 +1742,14 @@ static void nv_rx_process(struct net_device *dev)
u32 Flags;
u32 vlanflags = 0;
-
for (;;) {
struct sk_buff *skb;
int len;
int i;
- if (np->cur_rx - np->refill_rx >= RX_RING)
+ if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
break; /* we scanned the whole ring - do not continue */
- i = np->cur_rx % RX_RING;
+ i = np->cur_rx % np->rx_ring_size;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
@@ -1665,14 +1856,16 @@ static void nv_rx_process(struct net_device *dev)
}
}
}
- Flags &= NV_RX2_CHECKSUMMASK;
- if (Flags == NV_RX2_CHECKSUMOK1 ||
- Flags == NV_RX2_CHECKSUMOK2 ||
- Flags == NV_RX2_CHECKSUMOK3) {
- dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
- np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+ if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
+ Flags &= NV_RX2_CHECKSUMMASK;
+ if (Flags == NV_RX2_CHECKSUMOK1 ||
+ Flags == NV_RX2_CHECKSUMOK2 ||
+ Flags == NV_RX2_CHECKSUMOK3) {
+ dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
+ np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+ }
}
}
/* got a valid packet - forward it to the network core */
@@ -1747,18 +1940,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_drain_rx(dev);
nv_drain_tx(dev);
/* reinit driver view of the rx queue */
- nv_init_rx(dev);
- nv_init_tx(dev);
- /* alloc new rx buffers */
set_bufsize(dev);
- if (nv_alloc_rx(dev)) {
+ if (nv_init_ring(dev)) {
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
}
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -1832,16 +2022,16 @@ static void nv_set_multicast(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 addr[2];
u32 mask[2];
- u32 pff;
+ u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
memset(addr, 0, sizeof(addr));
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
- pff = NVREG_PFF_PROMISC;
+ pff |= NVREG_PFF_PROMISC;
} else {
- pff = NVREG_PFF_MYADDR;
+ pff |= NVREG_PFF_MYADDR;
if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
u32 alwaysOff[2];
@@ -1886,6 +2076,35 @@ static void nv_set_multicast(struct net_device *dev)
spin_unlock_irq(&np->lock);
}
+void nv_update_pause(struct net_device *dev, u32 pause_flags)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
+
+ if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
+ u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
+ if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
+ writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ } else {
+ writel(pff, base + NvRegPacketFilterFlags);
+ }
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
+ u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
+ if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
+ writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
+ writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ } else {
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+ writel(regmisc, base + NvRegMisc1);
+ }
+ }
+}
+
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
@@ -1901,12 +2120,14 @@ static int nv_update_linkspeed(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int adv, lpa;
+ int adv = 0;
+ int lpa = 0;
+ int adv_lpa, adv_pause, lpa_pause;
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
int retval = 0;
- u32 control_1000, status_1000, phyreg;
+ u32 control_1000, status_1000, phyreg, pause_flags;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -1952,10 +2173,15 @@ static int nv_update_linkspeed(struct net_device *dev)
goto set_speed;
}
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
+ dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
+ dev->name, adv, lpa);
+
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
- control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
- status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
@@ -1967,27 +2193,22 @@ static int nv_update_linkspeed(struct net_device *dev)
}
}
- adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
- dev->name, adv, lpa);
-
/* FIXME: handle parallel detection properly */
- lpa = lpa & adv;
- if (lpa & LPA_100FULL) {
+ adv_lpa = lpa & adv;
+ if (adv_lpa & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
- } else if (lpa & LPA_100HALF) {
+ } else if (adv_lpa & LPA_100HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 0;
- } else if (lpa & LPA_10FULL) {
+ } else if (adv_lpa & LPA_10FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 1;
- } else if (lpa & LPA_10HALF) {
+ } else if (adv_lpa & LPA_10HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
} else {
- dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
+ dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
@@ -2030,6 +2251,46 @@ set_speed:
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
+ pause_flags = 0;
+ /* setup pause frame */
+ if (np->duplex != 0) {
+ if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
+ adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
+ lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+
+ switch (adv_pause) {
+ case (ADVERTISE_PAUSE_CAP):
+ if (lpa_pause & LPA_PAUSE_CAP) {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
+ {
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause & LPA_PAUSE_CAP)
+ {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ if (lpa_pause == LPA_PAUSE_ASYM)
+ {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ }
+ break;
+ }
+ } else {
+ pause_flags = np->pause_flags;
+ }
+ }
+ nv_update_pause(dev, pause_flags);
+
return retval;
}
@@ -2090,7 +2351,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
spin_lock(&np->lock);
nv_tx_done(dev);
spin_unlock(&np->lock);
-
+
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
spin_lock(&np->lock);
@@ -2098,7 +2359,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock(&np->lock);
}
-
+
if (events & NVREG_IRQ_LINK) {
spin_lock(&np->lock);
nv_link_irq(dev);
@@ -2163,7 +2424,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
spin_lock_irq(&np->lock);
nv_tx_done(dev);
spin_unlock_irq(&np->lock);
-
+
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
@@ -2206,7 +2467,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
-
+
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock);
@@ -2214,7 +2475,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock);
}
-
+
if (i > max_interrupt_work) {
spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
@@ -2253,7 +2514,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
-
+
if (events & NVREG_IRQ_LINK) {
spin_lock_irq(&np->lock);
nv_link_irq(dev);
@@ -2290,6 +2551,175 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
return IRQ_RETVAL(i);
}
+static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+
+ dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
+ } else {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
+ }
+ pci_push(base);
+ dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
+ if (!(events & NVREG_IRQ_TIMER))
+ return IRQ_RETVAL(0);
+
+ spin_lock(&np->lock);
+ np->intr_test = 1;
+ spin_unlock(&np->lock);
+
+ dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
+
+ return IRQ_RETVAL(1);
+}
+
+static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 msixmap = 0;
+
+ /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
+ * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
+ * the remaining 8 interrupts.
+ */
+ for (i = 0; i < 8; i++) {
+ if ((irqmask >> i) & 0x1) {
+ msixmap |= vector << (i << 2);
+ }
+ }
+ writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
+
+ msixmap = 0;
+ for (i = 0; i < 8; i++) {
+ if ((irqmask >> (i + 8)) & 0x1) {
+ msixmap |= vector << (i << 2);
+ }
+ }
+ writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
+}
+
+static int nv_request_irq(struct net_device *dev, int intr_test)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ }
+ if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+ np->msi_flags |= NV_MSI_X_ENABLED;
+ if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
+ /* Request irq for rx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+ /* Request irq for tx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_rx;
+ }
+ /* Request irq for link and timer handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_tx;
+ }
+ /* map interrupts to their respective vector */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
+ } else {
+ /* Request irq for all interrupts */
+ if ((!intr_test &&
+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
+ (intr_test &&
+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ }
+ }
+ }
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
+ if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
+ (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ /* enable msi vector 0 */
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ }
+ }
+ if (ret != 0) {
+ if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
+ (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0))
+ goto out_err;
+
+ }
+
+ return 0;
+out_free_tx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
+out_free_rx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+out_err:
+ return 1;
+}
+
+static void nv_free_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ free_irq(np->msi_x_entry[i].vector, dev);
+ }
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
+}
+
static void nv_do_nic_poll(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -2326,7 +2756,7 @@ static void nv_do_nic_poll(unsigned long data)
np->nic_poll_irq = 0;
/* FIXME: Do we need synchronize_irq(dev->irq) here? */
-
+
writel(mask, base + NvRegIrqMask);
pci_push(base);
@@ -2359,6 +2789,56 @@ static void nv_poll_controller(struct net_device *dev)
}
#endif
+static void nv_do_stats_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+ np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+ np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+ np->estats.tx_deferral += readl(base + NvRegTxDef);
+ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.tx_pause += readl(base + NvRegTxPause);
+ np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+ np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+ np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+ np->estats.rx_runt += readl(base + NvRegRxRunt);
+ np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
+ np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.rx_pause += readl(base + NvRegRxPause);
+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_broadcast;
+ np->estats.rx_errors_total =
+ np->estats.rx_crc_errors +
+ np->estats.rx_over_errors +
+ np->estats.rx_frame_error +
+ (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
+ np->estats.rx_late_collision +
+ np->estats.rx_runt +
+ np->estats.rx_frame_too_long;
+
+ if (!np->in_shutdown)
+ mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+}
+
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2382,17 +2862,19 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 flags = 0;
- spin_lock_irq(&np->lock);
if (wolinfo->wolopts == 0) {
- writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
- }
- if (wolinfo->wolopts & WAKE_MAGIC) {
- writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
+ } else if (wolinfo->wolopts & WAKE_MAGIC) {
np->wolenabled = 1;
+ flags = NVREG_WAKEUPFLAGS_ENABLE;
+ }
+ if (netif_running(dev)) {
+ spin_lock_irq(&np->lock);
+ writel(flags, base + NvRegWakeUpFlags);
+ spin_unlock_irq(&np->lock);
}
- spin_unlock_irq(&np->lock);
return 0;
}
@@ -2406,9 +2888,17 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (!netif_running(dev)) {
/* We do not track link speed / duplex setting if the
* interface is disabled. Force a link check */
- nv_update_linkspeed(dev);
+ if (nv_update_linkspeed(dev)) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ }
}
- switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+
+ if (netif_carrier_ok(dev)) {
+ switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
ecmd->speed = SPEED_10;
break;
@@ -2418,10 +2908,14 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
case NVREG_LINKSPEED_1000:
ecmd->speed = SPEED_1000;
break;
+ }
+ ecmd->duplex = DUPLEX_HALF;
+ if (np->duplex)
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
}
- ecmd->duplex = DUPLEX_HALF;
- if (np->duplex)
- ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = np->autoneg;
@@ -2429,23 +2923,20 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->autoneg) {
ecmd->advertising |= ADVERTISED_Autoneg;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- } else {
- adv = np->fixed_mode;
- }
- if (adv & ADVERTISE_10HALF)
- ecmd->advertising |= ADVERTISED_10baseT_Half;
- if (adv & ADVERTISE_10FULL)
- ecmd->advertising |= ADVERTISED_10baseT_Full;
- if (adv & ADVERTISE_100HALF)
- ecmd->advertising |= ADVERTISED_100baseT_Half;
- if (adv & ADVERTISE_100FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (np->autoneg && np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
- if (adv & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (adv & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (adv & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (adv & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (adv & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ if (adv & ADVERTISE_1000FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
}
-
ecmd->supported = (SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
@@ -2497,7 +2988,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
}
- spin_lock_irq(&np->lock);
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ }
+
if (ecmd->autoneg == AUTONEG_ENABLE) {
int adv, bmcr;
@@ -2505,7 +3007,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (ecmd->advertising & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
@@ -2514,16 +3016,22 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ adv |= ADVERTISE_PAUSE_ASYM;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
if (ecmd->advertising & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -2534,7 +3042,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
np->autoneg = 0;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
@@ -2543,30 +3051,49 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv |= ADVERTISE_100HALF;
if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
adv |= ADVERTISE_100FULL;
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
+ adv |= ADVERTISE_PAUSE_ASYM;
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
np->fixed_mode = adv;
if (np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
- bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
- if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+ bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
+ if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
bmcr |= BMCR_FULLDPLX;
- if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+ if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-
- if (netif_running(dev)) {
+ if (np->phy_oui == PHY_OUI_MARVELL) {
+ /* reset the phy */
+ if (phy_reset(dev)) {
+ printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ return -EINVAL;
+ }
+ } else if (netif_running(dev)) {
/* Wait a bit and then reconfigure the nic. */
udelay(10);
nv_linkchange(dev);
}
}
- spin_unlock_irq(&np->lock);
+
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
return 0;
}
@@ -2598,196 +3125,742 @@ static int nv_nway_reset(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int ret;
- spin_lock_irq(&np->lock);
if (np->autoneg) {
int bmcr;
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ }
+
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
ret = 0;
} else {
ret = -EINVAL;
}
- spin_unlock_irq(&np->lock);
return ret;
}
-static struct ethtool_ops ops = {
- .get_drvinfo = nv_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_wol = nv_get_wol,
- .set_wol = nv_set_wol,
- .get_settings = nv_get_settings,
- .set_settings = nv_set_settings,
- .get_regs_len = nv_get_regs_len,
- .get_regs = nv_get_regs,
- .nway_reset = nv_nway_reset,
- .get_perm_addr = ethtool_op_get_perm_addr,
-};
+static int nv_set_tso(struct net_device *dev, u32 value)
+{
+ struct fe_priv *np = netdev_priv(dev);
-static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+ if ((np->driver_data & DEV_HAS_CHECKSUM))
+ return ethtool_op_set_tso(dev, value);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
- spin_lock_irq(&np->lock);
+ ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
- /* save vlan group */
- np->vlangrp = grp;
+ ring->rx_pending = np->rx_ring_size;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = np->tx_ring_size;
+}
- if (grp) {
- /* enable vlan on MAC */
- np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
+static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
+ dma_addr_t ring_addr;
+
+ if (ring->rx_pending < RX_RING_MIN ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_mini_pending != 0 ||
+ ring->rx_jumbo_pending != 0 ||
+ (np->desc_ver == DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_1 ||
+ ring->tx_pending > RING_MAX_DESC_VER_1)) ||
+ (np->desc_ver != DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
+ ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+ return -EINVAL;
+ }
+
+ /* allocate new rings */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+ &ring_addr);
} else {
- /* disable vlan on MAC */
- np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
- np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+ &ring_addr);
+ }
+ rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
+ rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
+ tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
+ tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
+ tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
+ if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
+ /* fall back to old rings */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ if(rxtx_ring)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+ rxtx_ring, ring_addr);
+ } else {
+ if (rxtx_ring)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+ rxtx_ring, ring_addr);
+ }
+ if (rx_skbuff)
+ kfree(rx_skbuff);
+ if (rx_dma)
+ kfree(rx_dma);
+ if (tx_skbuff)
+ kfree(tx_skbuff);
+ if (tx_dma)
+ kfree(tx_dma);
+ if (tx_dma_len)
+ kfree(tx_dma_len);
+ goto exit;
}
- writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain queues */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ /* delete queues */
+ free_rings(dev);
+ }
- spin_unlock_irq(&np->lock);
-};
+ /* set new values */
+ np->rx_ring_size = ring->rx_pending;
+ np->tx_ring_size = ring->tx_pending;
+ np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
+ np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+ np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
+ } else {
+ np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+ np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
+ }
+ np->rx_skbuff = (struct sk_buff**)rx_skbuff;
+ np->rx_dma = (dma_addr_t*)rx_dma;
+ np->tx_skbuff = (struct sk_buff**)tx_skbuff;
+ np->tx_dma = (dma_addr_t*)tx_dma;
+ np->tx_dma_len = (unsigned int*)tx_dma_len;
+ np->ring_addr = ring_addr;
+
+ memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
+ memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
+ memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
+ memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
+ memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
-static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+ if (netif_running(dev)) {
+ /* reinit driver view of the queues */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+
+ /* reinit nic view of the queues */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart engines */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ nv_enable_irq(dev);
+ }
+ return 0;
+exit:
+ return -ENOMEM;
+}
+
+static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
{
- /* nothing to do */
-};
+ struct fe_priv *np = netdev_priv(dev);
-static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
+ pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
+ pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
+ pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
+}
+
+static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int adv, bmcr;
+
+ if ((!np->autoneg && np->duplex == 0) ||
+ (np->autoneg && !pause->autoneg && np->duplex == 0)) {
+ printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
+ dev->name);
+ return -EINVAL;
+ }
+ if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
+ printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
+ return -EINVAL;
+ }
+
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ spin_unlock(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ }
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
+
+ if (np->autoneg && pause->autoneg) {
+ np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+ if (netif_running(dev))
+ printk(KERN_INFO "%s: link down.\n", dev->name);
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ } else {
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+
+ if (!netif_running(dev))
+ nv_update_linkspeed(dev);
+ else
+ nv_update_pause(dev, np->pause_flags);
+ }
+
+ if (netif_running(dev)) {
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+}
+
+static u32 nv_get_rx_csum(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+}
+
+static int nv_set_rx_csum(struct net_device *dev, u32 data)
{
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int i;
- u32 msixmap = 0;
+ int retcode = 0;
- /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
- * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
- * the remaining 8 interrupts.
- */
- for (i = 0; i < 8; i++) {
- if ((irqmask >> i) & 0x1) {
- msixmap |= vector << (i << 2);
+ if (np->driver_data & DEV_HAS_CHECKSUM) {
+
+ if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
+ (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
+ /* already set or unset */
+ return 0;
}
- }
- writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
- msixmap = 0;
- for (i = 0; i < 8; i++) {
- if ((irqmask >> (i + 8)) & 0x1) {
- msixmap |= vector << (i << 2);
+ if (data) {
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
+ } else {
+ printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ spin_lock_irq(&np->lock);
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
+ spin_unlock_irq(&np->lock);
}
+ } else {
+ return -EINVAL;
}
- writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
+
+ return retcode;
}
-static int nv_request_irq(struct net_device *dev)
+static int nv_set_tx_csum(struct net_device *dev, u32 data)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM)
+ return ethtool_op_set_tx_hw_csum(dev, data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int nv_set_sg(struct net_device *dev, u32 data)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_CHECKSUM)
+ return ethtool_op_set_sg(dev, data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int nv_get_stats_count(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_STATISTICS)
+ return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
+ else
+ return 0;
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ /* update stats */
+ nv_do_stats_poll((unsigned long)dev);
+
+ memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
+}
+
+static int nv_self_test_count(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (np->driver_data & DEV_HAS_TEST_EXTENDED)
+ return NV_TEST_COUNT_EXTENDED;
+ else
+ return NV_TEST_COUNT_BASE;
+}
+
+static int nv_link_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int mii_status;
+
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+ /* check phy link status */
+ if (!(mii_status & BMSR_LSTATUS))
+ return 0;
+ else
+ return 1;
+}
+
+static int nv_register_test(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i = 0;
+ u32 orig_read, new_read;
+
+ do {
+ orig_read = readl(base + nv_registers_test[i].reg);
+
+ /* xor with mask to toggle bits */
+ orig_read ^= nv_registers_test[i].mask;
+
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ new_read = readl(base + nv_registers_test[i].reg);
+
+ if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
+ return 0;
+
+ /* restore original value */
+ orig_read ^= nv_registers_test[i].mask;
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ } while (nv_registers_test[++i].reg != 0);
+
+ return 1;
+}
+
+static int nv_interrupt_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
- int i;
+ int testcnt;
+ u32 save_msi_flags, save_poll_interval = 0;
- if (np->msi_flags & NV_MSI_X_CAPABLE) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- np->msi_x_entry[i].entry = i;
+ if (netif_running(dev)) {
+ /* free current irq */
+ nv_free_irq(dev);
+ save_poll_interval = readl(base+NvRegPollingInterval);
+ }
+
+ /* flag to test interrupt handler */
+ np->intr_test = 0;
+
+ /* setup test irq */
+ save_msi_flags = np->msi_flags;
+ np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
+ np->msi_flags |= 0x001; /* setup 1 vector */
+ if (nv_request_irq(dev, 1))
+ return 0;
+
+ /* setup timer interrupt */
+ writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+
+ nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+
+ /* wait for at least one interrupt */
+ msleep(100);
+
+ spin_lock_irq(&np->lock);
+
+ /* flag should be set within ISR */
+ testcnt = np->intr_test;
+ if (!testcnt)
+ ret = 2;
+
+ nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+
+ spin_unlock_irq(&np->lock);
+
+ nv_free_irq(dev);
+
+ np->msi_flags = save_msi_flags;
+
+ if (netif_running(dev)) {
+ writel(save_poll_interval, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+ /* restore original irq */
+ if (nv_request_irq(dev, 0))
+ return 0;
+ }
+
+ return ret;
+}
+
+static int nv_loopback_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ struct sk_buff *tx_skb, *rx_skb;
+ dma_addr_t test_dma_addr;
+ u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+ u32 Flags;
+ int len, i, pkt_len;
+ u8 *pkt_data;
+ u32 filter_flags = 0;
+ u32 misc1_flags = 0;
+ int ret = 1;
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ filter_flags = readl(base + NvRegPacketFilterFlags);
+ misc1_flags = readl(base + NvRegMisc1);
+ } else {
+ nv_txrx_reset(dev);
+ }
+
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ nv_init_ring(dev);
+
+ /* setup hardware for loopback */
+ writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+ writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
+
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+
+ /* setup packet for tx */
+ pkt_len = ETH_DATA_LEN;
+ tx_skb = dev_alloc_skb(pkt_len);
+ pkt_data = skb_put(tx_skb, pkt_len);
+ for (i = 0; i < pkt_len; i++)
+ pkt_data[i] = (u8)(i & 0xff);
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ } else {
+ np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
+ np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+ np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ }
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
+
+ msleep(500);
+
+ /* check for rx of the packet */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
+ len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
+
+ } else {
+ Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
+ len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
+ }
+
+ if (Flags & NV_RX_AVAIL) {
+ ret = 0;
+ } else if (np->desc_ver == DESC_VER_1) {
+ if (Flags & NV_RX_ERROR)
+ ret = 0;
+ } else {
+ if (Flags & NV_RX2_ERROR) {
+ ret = 0;
}
- if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
- np->msi_flags |= NV_MSI_X_ENABLED;
- if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
- /* Request irq for rx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_err;
- }
- /* Request irq for tx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_free_rx;
- }
- /* Request irq for link and timer handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_free_tx;
- }
- /* map interrupts to their respective vector */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
- } else {
- /* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_err;
- }
+ }
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
+ if (ret) {
+ if (len != pkt_len) {
+ ret = 0;
+ dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
+ dev->name, len, pkt_len);
+ } else {
+ rx_skb = np->rx_skbuff[0];
+ for (i = 0; i < pkt_len; i++) {
+ if (rx_skb->data[i] != (u8)(i & 0xff)) {
+ ret = 0;
+ dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
+ dev->name, i);
+ break;
+ }
}
}
+ } else {
+ dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
}
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
- if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
- np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- goto out_err;
+
+ pci_unmap_page(np->pci_dev, test_dma_addr,
+ tx_skb->end-tx_skb->data,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(tx_skb);
+
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+
+ if (netif_running(dev)) {
+ writel(misc1_flags, base + NvRegMisc1);
+ writel(filter_flags, base + NvRegPacketFilterFlags);
+ nv_enable_irq(dev);
+ }
+
+ return ret;
+}
+
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int result;
+ memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
+
+ if (!nv_link_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[0] = 1;
+ }
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock_irq(&np->lock);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ } else {
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
}
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ spin_unlock_irq(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ }
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIMap0);
- writel(0, base + NvRegMSIMap1);
- /* enable msi vector 0 */
- writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ if (!nv_register_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[1] = 1;
+ }
+
+ result = nv_interrupt_test(dev);
+ if (result != 1) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[2] = 1;
+ }
+ if (result == 0) {
+ /* bail out */
+ return;
+ }
+
+ if (!nv_loopback_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[3] = 1;
+ }
+
+ if (netif_running(dev)) {
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ netif_start_queue(dev);
+ nv_enable_hw_interrupts(dev, np->irqmask);
}
}
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
- goto out_err;
- }
+}
- return 0;
-out_free_tx:
- free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
-out_free_rx:
- free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
-out_err:
- return 1;
+static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
+ break;
+ }
}
-static void nv_free_irq(struct net_device *dev)
+static struct ethtool_ops ops = {
+ .get_drvinfo = nv_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_wol = nv_get_wol,
+ .set_wol = nv_set_wol,
+ .get_settings = nv_get_settings,
+ .set_settings = nv_set_settings,
+ .get_regs_len = nv_get_regs_len,
+ .get_regs = nv_get_regs,
+ .nway_reset = nv_nway_reset,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = nv_set_tso,
+ .get_ringparam = nv_get_ringparam,
+ .set_ringparam = nv_set_ringparam,
+ .get_pauseparam = nv_get_pauseparam,
+ .set_pauseparam = nv_set_pauseparam,
+ .get_rx_csum = nv_get_rx_csum,
+ .set_rx_csum = nv_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = nv_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = nv_set_sg,
+ .get_strings = nv_get_strings,
+ .get_stats_count = nv_get_stats_count,
+ .get_ethtool_stats = nv_get_ethtool_stats,
+ .self_test_count = nv_self_test_count,
+ .self_test = nv_self_test,
+};
+
+static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct fe_priv *np = get_nvpriv(dev);
- int i;
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- free_irq(np->msi_x_entry[i].vector, dev);
- }
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
+ spin_lock_irq(&np->lock);
+
+ /* save vlan group */
+ np->vlangrp = grp;
+
+ if (grp) {
+ /* enable vlan on MAC */
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
} else {
- free_irq(np->pci_dev->irq, dev);
- if (np->msi_flags & NV_MSI_ENABLED) {
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- }
+ /* disable vlan on MAC */
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
}
-}
+
+ writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+
+ spin_unlock_irq(&np->lock);
+};
+
+static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ /* nothing to do */
+};
static int nv_open(struct net_device *dev)
{
@@ -2813,6 +3886,9 @@ static int nv_open(struct net_device *dev)
writel(0, base + NvRegAdapterControl);
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+
/* 2) initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
@@ -2829,7 +3905,7 @@ static int nv_open(struct net_device *dev)
/* 4) give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
/* 5) continue setup */
@@ -2871,7 +3947,8 @@ static int nv_open(struct net_device *dev)
base + NvRegAdapterControl);
writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
- writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
+ if (np->wolenabled)
+ writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
i = readl(base + NvRegPowerState);
if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
@@ -2887,7 +3964,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
- if (nv_request_irq(dev)) {
+ if (nv_request_irq(dev, 0)) {
goto out_drain;
}
@@ -2924,6 +4001,11 @@ static int nv_open(struct net_device *dev)
}
if (oom)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+
+ /* start statistics timer */
+ if (np->driver_data & DEV_HAS_STATISTICS)
+ mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+
spin_unlock_irq(&np->lock);
return 0;
@@ -2944,6 +4026,7 @@ static int nv_close(struct net_device *dev)
del_timer_sync(&np->oom_kick);
del_timer_sync(&np->nic_poll);
+ del_timer_sync(&np->stats_poll);
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
@@ -3003,6 +4086,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
+ init_timer(&np->stats_poll);
+ np->stats_poll.data = (unsigned long) dev;
+ np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
err = pci_enable_device(pci_dev);
if (err) {
@@ -3017,7 +4103,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (err < 0)
goto out_disable;
- if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
+ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS))
np->register_size = NV_PCI_REGSZ_VER2;
else
np->register_size = NV_PCI_REGSZ_VER1;
@@ -3049,16 +4135,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
- if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
- pci_name(pci_dev));
- } else {
- dev->features |= NETIF_F_HIGHDMA;
- printk(KERN_INFO "forcedeth: using HIGHDMA\n");
- }
- if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
- pci_name(pci_dev));
+ if (dma_64bit) {
+ if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
+ pci_name(pci_dev));
+ } else {
+ dev->features |= NETIF_F_HIGHDMA;
+ printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+ }
+ if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
+ pci_name(pci_dev));
+ }
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
@@ -3091,13 +4179,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
}
np->msi_flags = 0;
- if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
+ if ((id->driver_data & DEV_HAS_MSI) && msi) {
np->msi_flags |= NV_MSI_CAPABLE;
}
- if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
+ if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
np->msi_flags |= NV_MSI_X_CAPABLE;
}
+ np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
+ if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
+ np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
+ }
+
+
err = -ENOMEM;
np->base = ioremap(addr, np->register_size);
if (!np->base)
@@ -3106,21 +4200,38 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->irq = pci_dev->irq;
+ np->rx_ring_size = RX_RING_DEFAULT;
+ np->tx_ring_size = TX_RING_DEFAULT;
+ np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
+ np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
+
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->rx_ring.orig = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc) * (RX_RING + TX_RING),
+ sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
&np->ring_addr);
if (!np->rx_ring.orig)
goto out_unmap;
- np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
+ np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
np->rx_ring.ex = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+ sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
&np->ring_addr);
if (!np->rx_ring.ex)
goto out_unmap;
- np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
+ np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
+ np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
+ np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
+ np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
+ np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
+ np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
+ if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
+ goto out_freering;
+ memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
+ memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
+ memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
+ memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
+ memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
dev->open = nv_open;
dev->stop = nv_close;
@@ -3242,9 +4353,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (i == 33) {
printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
pci_name(pci_dev));
- goto out_freering;
+ goto out_error;
}
-
+
/* reset it */
phy_init(dev);
@@ -3256,7 +4367,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
err = register_netdev(dev);
if (err) {
printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
- goto out_freering;
+ goto out_error;
}
printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
@@ -3264,14 +4375,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
return 0;
-out_freering:
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
- np->rx_ring.orig, np->ring_addr);
- else
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
- np->rx_ring.ex, np->ring_addr);
+out_error:
pci_set_drvdata(pci_dev, NULL);
+out_freering:
+ free_rings(dev);
out_unmap:
iounmap(get_hwbase(dev));
out_relreg:
@@ -3287,15 +4394,11 @@ out:
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
- struct fe_priv *np = netdev_priv(dev);
unregister_netdev(dev);
/* free all structures */
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
- else
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
+ free_rings(dev);
iounmap(get_hwbase(dev));
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
@@ -3358,11 +4461,43 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
},
{0,},
};
@@ -3392,10 +4527,12 @@ module_param(optimization_mode, int, 0);
MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
module_param(poll_interval, int, 0);
MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
-module_param(disable_msi, int, 0);
-MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
-module_param(disable_msix, int, 0);
-MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(msix, int, 0);
+MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(dma_64bit, int, 0);
+MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 0d7a6250e346..e26a3e407d70 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -446,7 +446,7 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
+int __init
init_module(void)
{
struct net_device *dev;
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index cf9fb3698a6b..551a71b3c5fd 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -384,7 +384,7 @@ hp_block_output(struct net_device *dev, int count,
}
/* This function resets the ethercard if something screws up. */
-static void
+static void __init
hp_init_card(struct net_device *dev)
{
int irq = dev->irq;
@@ -409,7 +409,7 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
+int __init
init_module(void)
{
struct net_device *dev;
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 01ad904215a1..51fd51609ea9 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -1,4 +1,4 @@
-/*
+/*
net-3-driver for the IBM LAN Adapter/A
This is an extension to the Linux operating system, and is covered by the
@@ -11,9 +11,9 @@ This driver is based both on the SK_MCA driver, which is itself based on the
SK_G16 and 3C523 driver.
paper sources:
- 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
+ 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
Hans-Peter Messmer for the basic Microchannel stuff
-
+
'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
for help on Ethernet driver programming
@@ -27,14 +27,14 @@ paper sources:
special acknowledgements to:
- Bob Eager for helping me out with documentation from IBM
- - Jim Shorney for his endless patience with me while I was using
+ - Jim Shorney for his endless patience with me while I was using
him as a beta tester to trace down the address filter bug ;-)
Missing things:
-> set debug level via ioctl instead of compile-time switches
-> I didn't follow the development of the 2.1.x kernels, so my
- assumptions about which things changed with which kernel version
+ assumptions about which things changed with which kernel version
are probably nonsense
History:
@@ -275,7 +275,7 @@ static void InitDscrs(struct net_device *dev)
priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
-
+
for (z = 0; z < priv->rxbufcnt; z++) {
rra.startlo = baddr;
rra.starthi = 0;
@@ -570,7 +570,7 @@ static void irqrx_handler(struct net_device *dev)
lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
- /* iron out upper word halves of fields we use - SONIC will duplicate
+ /* iron out upper word halves of fields we use - SONIC will duplicate
bits 0..15 to 16..31 */
rda.status &= 0xffff;
@@ -836,9 +836,9 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
memcpy_toio(priv->base + baddr, skb->data, skb->len);
- /* copy filler into RAM - in case we're filling up...
+ /* copy filler into RAM - in case we're filling up...
we're filling a bit more than necessary, but that doesn't harm
- since the buffer is far larger...
+ since the buffer is far larger...
Sorry Linus for the filler string but I couldn't resist ;-) */
if (tmplen > skb->len) {
@@ -952,7 +952,7 @@ static int ibmlana_probe(struct net_device *dev)
priv->realirq = irq;
priv->medium = medium;
spin_lock_init(&priv->lock);
-
+
/* set base + irq for this device (irq not allocated so far) */
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h
index 458ee226e537..6b58bab9e308 100644
--- a/drivers/net/ibmlana.h
+++ b/drivers/net/ibmlana.h
@@ -17,7 +17,7 @@
/* media enumeration - defined in a way that it fits onto the LAN/A's
POS registers... */
-typedef enum {
+typedef enum {
Media_10BaseT, Media_10Base5,
Media_Unknown, Media_10Base2, Media_Count
} ibmlana_medium;
@@ -27,7 +27,7 @@ typedef enum {
typedef struct {
unsigned int slot; /* MCA-Slot-# */
struct net_device_stats stat; /* packet statistics */
- int realirq; /* memorizes actual IRQ, even when
+ int realirq; /* memorizes actual IRQ, even when
currently not allocated */
ibmlana_medium medium; /* physical cannector */
u32 tdastart, txbufstart, /* addresses */
@@ -41,7 +41,7 @@ typedef struct {
spinlock_t lock;
} ibmlana_priv;
-/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
+/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
a full 64K I/O range... */
#define IBM_LANA_IORANGE 0xa0
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 52d01027d9e7..666346f6469e 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -24,7 +24,7 @@
/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
/* option of the RS/6000 Platform Architechture to interface with virtual */
/* ethernet NICs that are presented to the partition by the hypervisor. */
-/* */
+/* */
/**************************************************************************/
/*
TODO:
@@ -79,7 +79,7 @@
#else
#define ibmveth_debug_printk_no_adapter(fmt, args...)
#define ibmveth_debug_printk(fmt, args...)
-#define ibmveth_assert(expr)
+#define ibmveth_assert(expr)
#endif
static int ibmveth_open(struct net_device *dev);
@@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static struct kobj_type ktype_veth_pool;
#ifdef CONFIG_PROC_FS
#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
}
/* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
{
pool->size = pool_size;
pool->index = pool_index;
pool->buff_size = buff_size;
pool->threshold = pool_size / 2;
+ pool->active = pool_active;
}
/* allocate and setup an buffer pool - called during open */
@@ -146,13 +148,13 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
{
int i;
- pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+ pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
if(!pool->free_map) {
return -1;
}
- pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
if(!pool->dma_addr) {
kfree(pool->free_map);
pool->free_map = NULL;
@@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
atomic_set(&pool->available, 0);
pool->producer_index = 0;
pool->consumer_index = 0;
- pool->active = 0;
return 0;
}
@@ -214,7 +215,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
free_index = pool->consumer_index++ % pool->size;
index = pool->free_map[free_index];
-
+
ibmveth_assert(index != IBM_VETH_INVALID_MAP);
ibmveth_assert(pool->skbuff[index] == NULL);
@@ -231,10 +232,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
desc.desc = 0;
desc.fields.valid = 1;
desc.fields.length = pool->buff_size;
- desc.fields.address = dma_addr;
+ desc.fields.address = dma_addr;
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
+
if(lpar_rc != H_SUCCESS) {
pool->free_map[free_index] = index;
pool->skbuff[index] = NULL;
@@ -250,13 +251,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
adapter->replenish_add_buff_success++;
}
}
-
+
mb();
atomic_add(buffers_added, &(pool->available));
}
/* replenish routine */
-static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
{
int i;
@@ -264,7 +265,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
for(i = 0; i < IbmVethNumBufferPools; i++)
if(adapter->rx_buff_pool[i].active)
- ibmveth_replenish_buffer_pool(adapter,
+ ibmveth_replenish_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
@@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
kfree(pool->skbuff);
pool->skbuff = NULL;
}
- pool->active = 0;
}
/* remove a buffer from a pool */
@@ -372,7 +372,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
+
if(lpar_rc != H_SUCCESS) {
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
@@ -407,7 +407,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
free_page((unsigned long)adapter->buffer_list_addr);
adapter->buffer_list_addr = NULL;
- }
+ }
if(adapter->filter_list_addr != NULL) {
if(!dma_mapping_error(adapter->filter_list_dma)) {
@@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
for(i = 0; i<IbmVethNumBufferPools; i++)
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
}
static int ibmveth_open(struct net_device *netdev)
@@ -450,10 +452,10 @@ static int ibmveth_open(struct net_device *netdev)
for(i = 0; i<IbmVethNumBufferPools; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
-
+
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
-
+
if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
ibmveth_cleanup(adapter);
@@ -489,9 +491,6 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- /* call change_mtu to init the buffer pools based in initial mtu */
- ibmveth_change_mtu(netdev, netdev->mtu);
-
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
mac_address = mac_address >> 16;
@@ -504,7 +503,7 @@ static int ibmveth_open(struct net_device *netdev)
ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
-
+
lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
adapter->buffer_list_dma,
rxq_desc.desc,
@@ -519,7 +518,18 @@ static int ibmveth_open(struct net_device *netdev)
rxq_desc.desc,
mac_address);
ibmveth_cleanup(adapter);
- return -ENONET;
+ return -ENONET;
+ }
+
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ if(!adapter->rx_buff_pool[i].active)
+ continue;
+ if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+ ibmveth_error_printk("unable to alloc pool\n");
+ adapter->rx_buff_pool[i].active = 0;
+ ibmveth_cleanup(adapter);
+ return -ENOMEM ;
+ }
}
ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
@@ -547,10 +557,11 @@ static int ibmveth_close(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev->priv;
long lpar_rc;
-
+
ibmveth_debug_printk("close starting\n");
- netif_stop_queue(netdev);
+ if (!adapter->pool_config)
+ netif_stop_queue(netdev);
free_irq(netdev->irq, netdev);
@@ -694,7 +705,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[5].desc,
correlator);
} while ((lpar_rc == H_BUSY) && (retry_count--));
-
+
if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
int i;
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
@@ -780,7 +791,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
/* more work to do - return that we are not done yet */
netdev->quota -= frames_processed;
*budget -= frames_processed;
- return 1;
+ return 1;
}
/* we think we are done - reenable interrupts, then check once more to make sure we are done */
@@ -806,7 +817,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
}
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
-{
+{
struct net_device *netdev = dev_instance;
struct ibmveth_adapter *adapter = netdev->priv;
unsigned long lpar_rc;
@@ -862,7 +873,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
}
}
-
+
/* re-enable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableFiltering,
@@ -876,46 +887,22 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
{
struct ibmveth_adapter *adapter = dev->priv;
+ int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
int i;
- int prev_smaller = 1;
- if ((new_mtu < 68) ||
- (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
+ if (new_mtu < IBMVETH_MAX_MTU)
return -EINVAL;
+ /* Look for an active buffer pool that can hold the new MTU */
for(i = 0; i<IbmVethNumBufferPools; i++) {
- int activate = 0;
- if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
- activate = 1;
- prev_smaller= 1;
- } else {
- if (prev_smaller)
- activate = 1;
- prev_smaller= 0;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ dev->mtu = new_mtu;
+ return 0;
}
-
- if (activate && !adapter->rx_buff_pool[i].active) {
- struct ibmveth_buff_pool *pool =
- &adapter->rx_buff_pool[i];
- if(ibmveth_alloc_buffer_pool(pool)) {
- ibmveth_error_printk("unable to alloc pool\n");
- return -ENOMEM;
- }
- adapter->rx_buff_pool[i].active = 1;
- } else if (!activate && adapter->rx_buff_pool[i].active) {
- adapter->rx_buff_pool[i].active = 0;
- h_free_logical_lan_buffer(adapter->vdev->unit_address,
- (u64)pool_size[i]);
- }
-
}
-
- /* kick the interrupt handler so that the new buffer pools get
- replenished or deallocated */
- ibmveth_interrupt(dev->irq, dev, NULL);
-
- dev->mtu = new_mtu;
- return 0;
+ return -EINVAL;
}
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -928,7 +915,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
unsigned int *mcastFilterSize_p;
- ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
+ ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
dev->unit_address);
mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
@@ -937,7 +924,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
"attribute\n", __FILE__, __LINE__);
return 0;
}
-
+
mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
if(!mcastFilterSize_p) {
printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
@@ -945,7 +932,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
__FILE__, __LINE__);
return 0;
}
-
+
netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
if(!netdev)
@@ -960,13 +947,14 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->mcastFilterSize= *mcastFilterSize_p;
-
+ adapter->pool_config = 0;
+
/* Some older boxes running PHYP non-natively have an OF that
- returns a 8-byte local-mac-address field (and the first
+ returns a 8-byte local-mac-address field (and the first
2 bytes have to be ignored) while newer boxes' OF return
- a 6-byte field. Note that IEEE 1275 specifies that
+ a 6-byte field. Note that IEEE 1275 specifies that
local-mac-address must be a 6-byte field.
- The RPA doc specifies that the first byte must be 10b, so
+ The RPA doc specifies that the first byte must be 10b, so
we'll just look for it to solve this 8 vs. 6 byte field issue */
if ((*mac_addr_p & 0x3) != 0x02)
@@ -976,7 +964,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
memcpy(&adapter->mac_addr, mac_addr_p, 6);
adapter->liobn = dev->iommu_table->it_index;
-
+
netdev->irq = dev->irq;
netdev->open = ibmveth_open;
netdev->poll = ibmveth_poll;
@@ -989,14 +977,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->ethtool_ops = &netdev_ethtool_ops;
netdev->change_mtu = ibmveth_change_mtu;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->features |= NETIF_F_LLTX;
+ netdev->features |= NETIF_F_LLTX;
spin_lock_init(&adapter->stats_lock);
memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
- for(i = 0; i<IbmVethNumBufferPools; i++)
- ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
- pool_count[i], pool_size[i]);
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+ kobj->parent = &dev->dev.kobj;
+ sprintf(kobj->name, "pool%d", i);
+ kobj->ktype = &ktype_veth_pool;
+ kobject_register(kobj);
+ }
ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
@@ -1025,6 +1020,10 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev->dev.driver_data;
struct ibmveth_adapter *adapter = netdev->priv;
+ int i;
+
+ for(i = 0; i<IbmVethNumBufferPools; i++)
+ kobject_unregister(&adapter->rx_buff_pool[i].kobj);
unregister_netdev(netdev);
@@ -1048,7 +1047,7 @@ static void ibmveth_proc_unregister_driver(void)
remove_proc_entry(IBMVETH_PROC_DIR, NULL);
}
-static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
+static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0) {
return (void *)1;
@@ -1063,18 +1062,18 @@ static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return NULL;
}
-static void ibmveth_seq_stop(struct seq_file *seq, void *v)
+static void ibmveth_seq_stop(struct seq_file *seq, void *v)
{
}
-static int ibmveth_seq_show(struct seq_file *seq, void *v)
+static int ibmveth_seq_show(struct seq_file *seq, void *v)
{
struct ibmveth_adapter *adapter = seq->private;
char *current_mac = ((char*) &adapter->netdev->dev_addr);
char *firmware_mac = ((char*) &adapter->mac_addr) ;
seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
-
+
seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
@@ -1083,7 +1082,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
firmware_mac[0], firmware_mac[1], firmware_mac[2],
firmware_mac[3], firmware_mac[4], firmware_mac[5]);
-
+
seq_printf(seq, "\nAdapter Statistics:\n");
seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
@@ -1095,7 +1094,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
-
+
return 0;
}
static struct seq_operations ibmveth_seq_ops = {
@@ -1153,11 +1152,11 @@ static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
}
#else /* CONFIG_PROC_FS */
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
+static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
{
}
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
+static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
{
}
static void ibmveth_proc_register_driver(void)
@@ -1169,6 +1168,132 @@ static void ibmveth_proc_unregister_driver(void)
}
#endif /* CONFIG_PROC_FS */
+static struct attribute veth_active_attr;
+static struct attribute veth_num_attr;
+static struct attribute veth_size_attr;
+
+static ssize_t veth_pool_show(struct kobject * kobj,
+ struct attribute * attr, char * buf)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+
+ if (attr == &veth_active_attr)
+ return sprintf(buf, "%d\n", pool->active);
+ else if (attr == &veth_num_attr)
+ return sprintf(buf, "%d\n", pool->size);
+ else if (attr == &veth_size_attr)
+ return sprintf(buf, "%d\n", pool->buff_size);
+ return 0;
+}
+
+static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
+const char * buf, size_t count)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+ struct net_device *netdev =
+ container_of(kobj->parent, struct device, kobj)->driver_data;
+ struct ibmveth_adapter *adapter = netdev->priv;
+ long value = simple_strtol(buf, NULL, 10);
+ long rc;
+
+ if (attr == &veth_active_attr) {
+ if (value && !pool->active) {
+ if(ibmveth_alloc_buffer_pool(pool)) {
+ ibmveth_error_printk("unable to alloc pool\n");
+ return -ENOMEM;
+ }
+ pool->active = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else if (!value && pool->active) {
+ int mtu = netdev->mtu + IBMVETH_BUFF_OH;
+ int i;
+ /* Make sure there is a buffer pool with buffers that
+ can hold a packet of the size of the MTU */
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ if (pool == &adapter->rx_buff_pool[i])
+ continue;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (mtu < adapter->rx_buff_pool[i].buff_size) {
+ pool->active = 0;
+ h_free_logical_lan_buffer(adapter->
+ vdev->
+ unit_address,
+ pool->
+ buff_size);
+ }
+ }
+ if (pool->active) {
+ ibmveth_error_printk("no active pool >= MTU\n");
+ return -EPERM;
+ }
+ }
+ } else if (attr == &veth_num_attr) {
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
+ return -EINVAL;
+ else {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ } else if (attr == &veth_size_attr) {
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
+ return -EINVAL;
+ else {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->buff_size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ }
+
+ /* kick the interrupt handler to allocate/deallocate pools */
+ ibmveth_interrupt(netdev->irq, netdev, NULL);
+ return count;
+}
+
+
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
+ };
+
+static ATTR(active, 0644);
+static ATTR(num, 0644);
+static ATTR(size, 0644);
+
+static struct attribute * veth_pool_attrs[] = {
+ &veth_active_attr,
+ &veth_num_attr,
+ &veth_size_attr,
+ NULL,
+};
+
+static struct sysfs_ops veth_pool_ops = {
+ .show = veth_pool_show,
+ .store = veth_pool_store,
+};
+
+static struct kobj_type ktype_veth_pool = {
+ .release = NULL,
+ .sysfs_ops = &veth_pool_ops,
+ .default_attrs = veth_pool_attrs,
+};
+
+
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
{ "network", "IBM,l-lan"},
{ "", "" }
@@ -1198,7 +1323,7 @@ static void __exit ibmveth_module_exit(void)
{
vio_unregister_driver(&ibmveth_driver);
ibmveth_proc_unregister_driver();
-}
+}
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 46919a814fca..8385bf836507 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -75,10 +75,13 @@
#define IbmVethNumBufferPools 5
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
+#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
-/* pool_size should be sorted */
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -94,6 +97,7 @@ struct ibmveth_buff_pool {
dma_addr_t *dma_addr;
struct sk_buff **skbuff;
int active;
+ struct kobject kobj;
};
struct ibmveth_rx_q {
@@ -118,6 +122,7 @@ struct ibmveth_adapter {
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
struct ibmveth_rx_q rx_queue;
+ int pool_config;
/* adapter specific stats */
u64 replenish_task_cycles;
@@ -134,7 +139,7 @@ struct ibmveth_adapter {
spinlock_t stats_lock;
};
-struct ibmveth_buf_desc_fields {
+struct ibmveth_buf_desc_fields {
u32 valid : 1;
u32 toggle : 1;
u32 reserved : 6;
@@ -143,7 +148,7 @@ struct ibmveth_buf_desc_fields {
};
union ibmveth_buf_desc {
- u64 desc;
+ u64 desc;
struct ibmveth_buf_desc_fields fields;
};
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
index 7c7aff1ea7d5..a8a2d3d03567 100644
--- a/drivers/net/ixgb/Makefile
+++ b/drivers/net/ixgb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
#
-# Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved.
+# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index c83271b38621..a83ef28dadb0 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -84,7 +84,12 @@ struct ixgb_adapter;
#define IXGB_DBG(args...)
#endif
-#define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args)
+#define PFX "ixgb: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __FUNCTION__ , ## args))
+
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
@@ -175,6 +180,7 @@ struct ixgb_adapter {
uint64_t hw_csum_tx_good;
uint64_t hw_csum_tx_error;
uint32_t tx_int_delay;
+ uint32_t tx_timeout_count;
boolean_t tx_int_delay_enable;
boolean_t detect_tx_hung;
@@ -192,7 +198,9 @@ struct ixgb_adapter {
/* structs defined in ixgb_hw.h */
struct ixgb_hw hw;
+ u16 msg_enable;
struct ixgb_hw_stats stats;
+ uint32_t alloc_rx_buff_failed;
#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
#endif
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 661a46b95a61..8357c5590bfb 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 5190aa8761a2..bf6fa220f38e 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index d38ade5f2f4e..cf19b898ba9b 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -44,6 +44,8 @@ extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+#define IXGB_ALL_RAR_ENTRIES 16
+
struct ixgb_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -76,6 +78,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
{"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
{"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
{"tx_deferred_ok", IXGB_STAT(stats.dc)},
+ {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
{"rx_long_length_errors", IXGB_STAT(stats.roc)},
{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
#ifdef NETIF_F_TSO
@@ -117,6 +120,16 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
+static void ixgb_set_speed_duplex(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+}
+
static int
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
@@ -130,12 +143,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ixgb_down(adapter, TRUE);
ixgb_reset(adapter);
ixgb_up(adapter);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
-
+ ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
@@ -183,11 +191,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
if(netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+ ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
@@ -212,11 +216,7 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
if(netif_running(netdev)) {
ixgb_down(adapter,TRUE);
ixgb_up(adapter);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+ ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
return 0;
@@ -251,6 +251,19 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
}
#endif /* NETIF_F_TSO */
+static uint32_t
+ixgb_get_msglevel(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void
+ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
static int
@@ -303,7 +316,7 @@ ixgb_get_regs(struct net_device *netdev,
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
/* there are 16 RAR entries in hardware, we only use 3 */
- for(i = 0; i < 16; i++) {
+ for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
}
@@ -593,11 +606,7 @@ ixgb_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_new;
if((err = ixgb_up(adapter)))
return err;
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+ ixgb_set_speed_duplex(netdev);
}
return 0;
@@ -714,6 +723,8 @@ static struct ethtool_ops ixgb_ethtool_ops = {
.set_tx_csum = ixgb_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgb_get_msglevel,
+ .set_msglevel = ixgb_set_msglevel,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso,
.set_tso = ixgb_set_tso,
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 620cad48bdea..f7fa10e47fa2 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 382c6300ccc2..cb4568915ada 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -57,6 +57,7 @@ typedef enum {
typedef enum {
ixgb_media_type_unknown = 0,
ixgb_media_type_fiber = 1,
+ ixgb_media_type_copper = 2,
ixgb_num_media_types
} ixgb_media_type;
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index aee207eaa287..40a085f94c7b 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -43,6 +43,8 @@
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
-#endif /* #ifndef _IXGB_IDS_H_ */
+#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
+#define IXGB_SUBDEVICE_ID_A00C 0xA00C
+#endif /* #ifndef _IXGB_IDS_H_ */
/* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index cfd67d812f0d..57006fb8840e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -28,22 +28,6 @@
#include "ixgb.h"
-/* Change Log
- * 1.0.96 04/19/05
- * - Make needlessly global code static -- bunk@stusta.de
- * - ethtool cleanup -- shemminger@osdl.org
- * - Support for MODULE_VERSION -- linville@tuxdriver.com
- * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
- * 1.0.88 01/05/05
- * - include fix to the condition that determines when to quit NAPI - Robert Olsson
- * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
- * 1.0.84 10/26/04
- * - reset buffer_info->dma in Tx resource cleanup logic
- * 1.0.83 10/12/04
- * - sparse cleanup - shemminger@osdl.org
- * - fix tx resource cleanup logic
- */
-
char ixgb_driver_name[] = "ixgb";
static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
@@ -52,9 +36,9 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
+#define DRV_VERSION "1.0.109-k2"DRIVERNAPI
char ixgb_driver_version[] = DRV_VERSION;
-static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
/* ixgb_pci_tbl - PCI Device ID Table
*
@@ -67,6 +51,8 @@ static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
static struct pci_device_id ixgb_pci_tbl[] = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
@@ -148,6 +134,11 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
/* some defines for controlling descriptor fetches in h/w */
#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
@@ -196,7 +187,7 @@ module_exit(ixgb_exit_module);
* @adapter: board private structure
**/
-static inline void
+static void
ixgb_irq_disable(struct ixgb_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
@@ -210,7 +201,7 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
* @adapter: board private structure
**/
-static inline void
+static void
ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if(atomic_dec_and_test(&adapter->irq_sem)) {
@@ -231,6 +222,7 @@ ixgb_up(struct ixgb_adapter *adapter)
/* hardware has been reset, we need to reload some things */
+ ixgb_rar_set(hw, netdev->dev_addr, 0);
ixgb_set_multi(netdev);
ixgb_restore_vlan(adapter);
@@ -240,6 +232,9 @@ ixgb_up(struct ixgb_adapter *adapter)
ixgb_configure_rx(adapter);
ixgb_alloc_rx_buffers(adapter);
+ /* disable interrupts and get the hardware into a known state */
+ IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+
#ifdef CONFIG_PCI_MSI
{
boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
@@ -249,7 +244,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if (!pcix)
adapter->have_msi = FALSE;
else if((err = pci_enable_msi(adapter->pdev))) {
- printk (KERN_ERR
+ DPRINTK(PROBE, ERR,
"Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = FALSE;
/* proceed to try to request regular interrupt */
@@ -259,11 +254,11 @@ ixgb_up(struct ixgb_adapter *adapter)
#endif
if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
SA_SHIRQ | SA_SAMPLE_RANDOM,
- netdev->name, netdev)))
+ netdev->name, netdev))) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
return err;
-
- /* disable interrupts and get the hardware into a known state */
- IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+ }
if((hw->max_frame_size != max_frame) ||
(hw->max_frame_size !=
@@ -285,11 +280,12 @@ ixgb_up(struct ixgb_adapter *adapter)
}
mod_timer(&adapter->watchdog_timer, jiffies);
- ixgb_irq_enable(adapter);
#ifdef CONFIG_IXGB_NAPI
netif_poll_enable(netdev);
#endif
+ ixgb_irq_enable(adapter);
+
return 0;
}
@@ -326,7 +322,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
ixgb_adapter_stop(&adapter->hw);
if(!ixgb_init_hw(&adapter->hw))
- IXGB_DBG("ixgb_init_hw failed.\n");
+ DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
}
/**
@@ -363,7 +359,8 @@ ixgb_probe(struct pci_dev *pdev,
} else {
if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
(err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
- IXGB_ERR("No usable DMA configuration, aborting\n");
+ printk(KERN_ERR
+ "ixgb: No usable DMA configuration, aborting\n");
goto err_dma_mask;
}
pci_using_dac = 0;
@@ -388,6 +385,7 @@ ixgb_probe(struct pci_dev *pdev,
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.back = adapter;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
mmio_start = pci_resource_start(pdev, BAR_0);
mmio_len = pci_resource_len(pdev, BAR_0);
@@ -416,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev,
netdev->change_mtu = &ixgb_change_mtu;
ixgb_set_ethtool_ops(netdev);
netdev->tx_timeout = &ixgb_tx_timeout;
- netdev->watchdog_timeo = HZ;
+ netdev->watchdog_timeo = 5 * HZ;
#ifdef CONFIG_IXGB_NAPI
netdev->poll = &ixgb_clean;
netdev->weight = 64;
@@ -428,6 +426,7 @@ ixgb_probe(struct pci_dev *pdev,
netdev->poll_controller = ixgb_netpoll;
#endif
+ strcpy(netdev->name, pci_name(pdev));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
netdev->base_addr = adapter->hw.io_base;
@@ -449,6 +448,9 @@ ixgb_probe(struct pci_dev *pdev,
#ifdef NETIF_F_TSO
netdev->features |= NETIF_F_TSO;
#endif
+#ifdef NETIF_F_LLTX
+ netdev->features |= NETIF_F_LLTX;
+#endif
if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -456,7 +458,7 @@ ixgb_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -465,6 +467,7 @@ ixgb_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if(!is_valid_ether_addr(netdev->perm_addr)) {
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
@@ -478,6 +481,7 @@ ixgb_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->tx_timeout_task,
(void (*)(void *))ixgb_tx_timeout_task, netdev);
+ strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev)))
goto err_register;
@@ -486,8 +490,7 @@ ixgb_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
- netdev->name);
+ DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
@@ -557,17 +560,17 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
- adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
-
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ adapter->rx_buffer_len = hw->max_frame_size;
if((hw->device_id == IXGB_DEVICE_ID_82597EX)
- ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
- ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
+ || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
+ || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
hw->mac_type = ixgb_82597;
else {
/* should never have loaded on this device */
- printk(KERN_ERR "ixgb: unsupported device id\n");
+ DPRINTK(PROBE, ERR, "unsupported device id\n");
}
/* enable flow control to be programmed */
@@ -665,6 +668,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -677,6 +682,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if(!txdr->desc) {
vfree(txdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
@@ -750,6 +757,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -763,6 +772,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if(!rxdr->desc) {
vfree(rxdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -794,21 +805,14 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
rctl |= IXGB_RCTL_SECRC;
- switch (adapter->rx_buffer_len) {
- case IXGB_RXBUFFER_2048:
- default:
+ if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
rctl |= IXGB_RCTL_BSIZE_2048;
- break;
- case IXGB_RXBUFFER_4096:
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
rctl |= IXGB_RCTL_BSIZE_4096;
- break;
- case IXGB_RXBUFFER_8192:
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
rctl |= IXGB_RCTL_BSIZE_8192;
- break;
- case IXGB_RXBUFFER_16384:
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
rctl |= IXGB_RCTL_BSIZE_16384;
- break;
- }
IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
}
@@ -898,22 +902,25 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
adapter->tx_ring.desc = NULL;
}
-static inline void
+static void
ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
struct ixgb_buffer *buffer_info)
{
struct pci_dev *pdev = adapter->pdev;
- if(buffer_info->dma) {
- pci_unmap_page(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
- if(buffer_info->skb) {
+
+ if (buffer_info->dma)
+ pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
+ PCI_DMA_TODEVICE);
+
+ if (buffer_info->skb)
dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
+
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ /* these fields must always be initialized in tx
+ * buffer_info->length = 0;
+ * buffer_info->next_to_watch = 0; */
}
/**
@@ -1112,8 +1119,8 @@ ixgb_watchdog(unsigned long data)
if(adapter->hw.link_up) {
if(!netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
- netdev->name, 10000, "Full Duplex");
+ DPRINTK(LINK, INFO,
+ "NIC Link is Up 10000 Mbps Full Duplex\n");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
@@ -1123,9 +1130,7 @@ ixgb_watchdog(unsigned long data)
if(netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO
- "ixgb: %s NIC Link is Down\n",
- netdev->name);
+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -1158,7 +1163,7 @@ ixgb_watchdog(unsigned long data)
#define IXGB_TX_FLAGS_VLAN 0x00000002
#define IXGB_TX_FLAGS_TSO 0x00000004
-static inline int
+static int
ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
#ifdef NETIF_F_TSO
@@ -1220,7 +1225,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
return 0;
}
-static inline boolean_t
+static boolean_t
ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
struct ixgb_context_desc *context_desc;
@@ -1258,7 +1263,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
#define IXGB_MAX_TXD_PWR 14
#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
-static inline int
+static int
ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
unsigned int first)
{
@@ -1284,6 +1289,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = 0;
len -= size;
offset += size;
@@ -1309,6 +1315,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = 0;
len -= size;
offset += size;
@@ -1323,7 +1330,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
return count;
}
-static inline void
+static void
ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1395,13 +1402,26 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return 0;
}
+#ifdef NETIF_F_LLTX
+ local_irq_save(flags);
+ if (!spin_trylock(&adapter->tx_lock)) {
+ /* Collision - tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+#else
spin_lock_irqsave(&adapter->tx_lock, flags);
+#endif
+
if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
+
+#ifndef NETIF_F_LLTX
spin_unlock_irqrestore(&adapter->tx_lock, flags);
+#endif
if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
@@ -1413,10 +1433,13 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
+#ifdef NETIF_F_LLTX
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+#endif
return NETDEV_TX_OK;
}
- if (tso)
+ if (likely(tso))
tx_flags |= IXGB_TX_FLAGS_TSO;
else if(ixgb_tx_csum(adapter, skb))
tx_flags |= IXGB_TX_FLAGS_CSUM;
@@ -1426,7 +1449,15 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
- return 0;
+#ifdef NETIF_F_LLTX
+ /* Make sure there is space in the ring for the next send. */
+ if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+
+#endif
+ return NETDEV_TX_OK;
}
/**
@@ -1448,6 +1479,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
}
@@ -1486,28 +1518,15 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
- IXGB_ERR("Invalid MTU setting\n");
+ DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
- if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
- || (max_frame <= IXGB_RXBUFFER_2048)) {
- adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
-
- } else if(max_frame <= IXGB_RXBUFFER_4096) {
- adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
-
- } else if(max_frame <= IXGB_RXBUFFER_8192) {
- adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
-
- } else {
- adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
- }
+ adapter->rx_buffer_len = max_frame;
netdev->mtu = new_mtu;
- if(old_max_frame != max_frame && netif_running(netdev)) {
-
+ if ((old_max_frame != max_frame) && netif_running(netdev)) {
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
}
@@ -1765,23 +1784,43 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
tx_ring->next_to_clean = i;
- spin_lock(&adapter->tx_lock);
- if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
- (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
-
- netif_wake_queue(netdev);
+ if (unlikely(netif_queue_stopped(netdev))) {
+ spin_lock(&adapter->tx_lock);
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
+ (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
+ netif_wake_queue(netdev);
+ spin_unlock(&adapter->tx_lock);
}
- spin_unlock(&adapter->tx_lock);
if(adapter->detect_tx_hung) {
/* detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
- if(tx_ring->buffer_info[i].dma &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+ if (tx_ring->buffer_info[eop].dma &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
- IXGB_STATUS_TXOFF))
+ IXGB_STATUS_TXOFF)) {
+ /* detected Tx unit hang */
+ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ IXGB_READ_REG(&adapter->hw, TDH),
+ IXGB_READ_REG(&adapter->hw, TDT),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->status);
netif_stop_queue(netdev);
+ }
}
return cleaned;
@@ -1794,7 +1833,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
* @sk_buff: socket buffer with received data
**/
-static inline void
+static void
ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -1858,6 +1897,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#endif
status = rx_desc->status;
skb = buffer_info->skb;
+ buffer_info->skb = NULL;
prefetch(skb->data);
@@ -1902,6 +1942,26 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
goto rxdesc_done;
}
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack */
+#define IXGB_CB_LENGTH 256
+ if (length < IXGB_CB_LENGTH) {
+ struct sk_buff *new_skb =
+ dev_alloc_skb(length + NET_IP_ALIGN);
+ if (new_skb) {
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ new_skb->dev = netdev;
+ memcpy(new_skb->data - NET_IP_ALIGN,
+ skb->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ }
+ /* end copybreak code */
+
/* Good Receive */
skb_put(skb, length);
@@ -1931,7 +1991,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
rxdesc_done:
/* clean up descriptor, might be written over by hw */
rx_desc->status = 0;
- buffer_info->skb = NULL;
/* use prefetched values */
rx_desc = next_rxd;
@@ -1971,12 +2030,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
/* leave three descriptors unused */
while(--cleancount > 2) {
- rx_desc = IXGB_RX_DESC(*rx_ring, i);
-
- skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
+ /* recycle! its good for you */
+ if (!(skb = buffer_info->skb))
+ skb = dev_alloc_skb(adapter->rx_buffer_len
+ + NET_IP_ALIGN);
+ else {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
- if(unlikely(!skb)) {
+ if (unlikely(!skb)) {
/* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
break;
}
@@ -1990,33 +2055,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma =
- pci_map_single(pdev,
- skb->data,
- adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+map_skb:
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
/* guarantee DD bit not set now before h/w gets descriptor
* this is the rest of the workaround for h/w double
* writeback. */
rx_desc->status = 0;
- if((i & ~(num_group_tail_writes- 1)) == i) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
-
- IXGB_WRITE_REG(&adapter->hw, RDT, i);
- }
if(++i == rx_ring->count) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- rx_ring->next_to_use = i;
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs, such
+ * as IA-64). */
+ wmb();
+ IXGB_WRITE_REG(&adapter->hw, RDT, i);
+ }
}
/**
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index dba20481ee80..ee982feac64d 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 8a83dfdf746d..39fbed29a3df 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -76,7 +76,7 @@ IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
- * Default Value: Read flow control settings from the EEPROM
+ * Default Value: 2 - Tx only (silicon bug avoidance)
*/
IXGB_PARAM(FlowControl, "Flow Control setting");
@@ -137,7 +137,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
*
* Valid Range: 1 - 65535
*
- * Default Value: 256 (0x100)
+ * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
*/
IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
@@ -165,8 +165,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define XSUMRX_DEFAULT OPTION_ENABLED
-#define FLOW_CONTROL_FULL ixgb_fc_full
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
@@ -174,9 +172,9 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define MIN_FCRTH 8
#define MAX_FCRTH 0x3FFF0
-#define DEFAULT_FCPAUSE 0x100 /* this may be too long */
#define MIN_FCPAUSE 1
#define MAX_FCPAUSE 0xffff
+#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
struct ixgb_option {
enum { enable_option, range_option, list_option } type;
@@ -336,7 +334,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
- .def = ixgb_fc_full,
+ .def = ixgb_fc_tx_pause,
.arg = { .l = { .nr = LIST_LEN(fc_list),
.p = fc_list }}
};
@@ -365,8 +363,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
} else {
adapter->hw.fc.high_water = opt.def;
}
- if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
- printk (KERN_INFO
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ printk (KERN_INFO
"Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
@@ -385,8 +383,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
} else {
adapter->hw.fc.low_water = opt.def;
}
- if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
- printk (KERN_INFO
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ printk (KERN_INFO
"Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
@@ -406,12 +404,12 @@ ixgb_check_options(struct ixgb_adapter *adapter)
} else {
adapter->hw.fc.pause_time = opt.def;
}
- if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
- printk (KERN_INFO
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ printk (KERN_INFO
"Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
- if (adapter->hw.fc.type & ixgb_fc_rx_pause) {
+ if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
diff --git a/drivers/net/myri10ge/Makefile b/drivers/net/myri10ge/Makefile
new file mode 100644
index 000000000000..5df891647aee
--- /dev/null
+++ b/drivers/net/myri10ge/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Myricom Myri-10G ethernet driver
+#
+
+obj-$(CONFIG_MYRI10GE) += myri10ge.o
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
new file mode 100644
index 000000000000..e1feb58bd661
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -0,0 +1,2869 @@
+/*************************************************************************
+ * myri10ge.c: Myricom Myri-10G Ethernet driver.
+ *
+ * Copyright (C) 2005, 2006 Myricom, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Myricom, Inc. nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *
+ * If the eeprom on your board is not recent enough, you will need to get a
+ * newer firmware image at:
+ * http://www.myri.com/scs/download-Myri10GE.html
+ *
+ * Contact Information:
+ * <help@myri.com>
+ * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
+ *************************************************************************/
+
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <net/checksum.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include "myri10ge_mcp.h"
+#include "myri10ge_mcp_gen_header.h"
+
+#define MYRI10GE_VERSION_STR "1.0.0"
+
+MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
+MODULE_AUTHOR("Maintainer: help@myri.com");
+MODULE_VERSION(MYRI10GE_VERSION_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+
+#define MYRI10GE_MAX_ETHER_MTU 9014
+
+#define MYRI10GE_ETH_STOPPED 0
+#define MYRI10GE_ETH_STOPPING 1
+#define MYRI10GE_ETH_STARTING 2
+#define MYRI10GE_ETH_RUNNING 3
+#define MYRI10GE_ETH_OPEN_FAILED 4
+
+#define MYRI10GE_EEPROM_STRINGS_SIZE 256
+#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
+
+#define MYRI10GE_NO_CONFIRM_DATA 0xffffffff
+#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
+
+struct myri10ge_rx_buffer_state {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(bus)
+ DECLARE_PCI_UNMAP_LEN(len)
+};
+
+struct myri10ge_tx_buffer_state {
+ struct sk_buff *skb;
+ int last;
+ DECLARE_PCI_UNMAP_ADDR(bus)
+ DECLARE_PCI_UNMAP_LEN(len)
+};
+
+struct myri10ge_cmd {
+ u32 data0;
+ u32 data1;
+ u32 data2;
+};
+
+struct myri10ge_rx_buf {
+ struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
+ u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
+ struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
+ struct myri10ge_rx_buffer_state *info;
+ int cnt;
+ int alloc_fail;
+ int mask; /* number of rx slots -1 */
+};
+
+struct myri10ge_tx_buf {
+ struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
+ u8 __iomem *wc_fifo; /* w/c send fifo address */
+ struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
+ char *req_bytes;
+ struct myri10ge_tx_buffer_state *info;
+ int mask; /* number of transmit slots -1 */
+ int boundary; /* boundary transmits cannot cross */
+ int req ____cacheline_aligned; /* transmit slots submitted */
+ int pkt_start; /* packets started */
+ int done ____cacheline_aligned; /* transmit slots completed */
+ int pkt_done; /* packets completed */
+};
+
+struct myri10ge_rx_done {
+ struct mcp_slot *entry;
+ dma_addr_t bus;
+ int cnt;
+ int idx;
+};
+
+struct myri10ge_priv {
+ int running; /* running? */
+ int csum_flag; /* rx_csums? */
+ struct myri10ge_tx_buf tx; /* transmit ring */
+ struct myri10ge_rx_buf rx_small;
+ struct myri10ge_rx_buf rx_big;
+ struct myri10ge_rx_done rx_done;
+ int small_bytes;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ u8 __iomem *sram;
+ int sram_size;
+ unsigned long board_span;
+ unsigned long iomem_base;
+ u32 __iomem *irq_claim;
+ u32 __iomem *irq_deassert;
+ char *mac_addr_string;
+ struct mcp_cmd_response *cmd;
+ dma_addr_t cmd_bus;
+ struct mcp_irq_data *fw_stats;
+ dma_addr_t fw_stats_bus;
+ struct pci_dev *pdev;
+ int msi_enabled;
+ unsigned int link_state;
+ unsigned int rdma_tags_available;
+ int intr_coal_delay;
+ u32 __iomem *intr_coal_delay_ptr;
+ int mtrr;
+ int wake_queue;
+ int stop_queue;
+ int down_cnt;
+ wait_queue_head_t down_wq;
+ struct work_struct watchdog_work;
+ struct timer_list watchdog_timer;
+ int watchdog_tx_done;
+ int watchdog_resets;
+ int tx_linearized;
+ int pause;
+ char *fw_name;
+ char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
+ char fw_version[128];
+ u8 mac_addr[6]; /* eeprom mac address */
+ unsigned long serial_number;
+ int vendor_specific_offset;
+ u32 devctl;
+ u16 msi_flags;
+ u32 pm_state[16];
+ u32 read_dma;
+ u32 write_dma;
+ u32 read_write_dma;
+};
+
+static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
+static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
+
+static char *myri10ge_fw_name = NULL;
+module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n");
+
+static int myri10ge_ecrc_enable = 1;
+module_param(myri10ge_ecrc_enable, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n");
+
+static int myri10ge_max_intr_slots = 1024;
+module_param(myri10ge_max_intr_slots, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n");
+
+static int myri10ge_small_bytes = -1; /* -1 == auto */
+module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n");
+
+static int myri10ge_msi = 1; /* enable msi by default */
+module_param(myri10ge_msi, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n");
+
+static int myri10ge_intr_coal_delay = 25;
+module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n");
+
+static int myri10ge_flow_control = 1;
+module_param(myri10ge_flow_control, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n");
+
+static int myri10ge_deassert_wait = 1;
+module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_deassert_wait,
+ "Wait when deasserting legacy interrupts\n");
+
+static int myri10ge_force_firmware = 0;
+module_param(myri10ge_force_firmware, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_force_firmware,
+ "Force firmware to assume aligned completions\n");
+
+static int myri10ge_skb_cross_4k = 0;
+module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_skb_cross_4k,
+ "Can a small skb cross a 4KB boundary?\n");
+
+static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+module_param(myri10ge_initial_mtu, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
+
+static int myri10ge_napi_weight = 64;
+module_param(myri10ge_napi_weight, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n");
+
+static int myri10ge_watchdog_timeout = 1;
+module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n");
+
+static int myri10ge_max_irq_loops = 1048576;
+module_param(myri10ge_max_irq_loops, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_irq_loops,
+ "Set stuck legacy IRQ detection threshold\n");
+
+#define MYRI10GE_FW_OFFSET 1024*1024
+#define MYRI10GE_HIGHPART_TO_U32(X) \
+(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
+#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
+
+#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
+
+static int
+myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
+ struct myri10ge_cmd *data, int atomic)
+{
+ struct mcp_cmd *buf;
+ char buf_bytes[sizeof(*buf) + 8];
+ struct mcp_cmd_response *response = mgp->cmd;
+ char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET;
+ u32 dma_low, dma_high, result, value;
+ int sleep_total = 0;
+
+ /* ensure buf is aligned to 8 bytes */
+ buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
+
+ buf->data0 = htonl(data->data0);
+ buf->data1 = htonl(data->data1);
+ buf->data2 = htonl(data->data2);
+ buf->cmd = htonl(cmd);
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf->response_addr.low = htonl(dma_low);
+ buf->response_addr.high = htonl(dma_high);
+ response->result = MYRI10GE_NO_RESPONSE_RESULT;
+ mb();
+ myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
+
+ /* wait up to 15ms. Longest command is the DMA benchmark,
+ * which is capped at 5ms, but runs from a timeout handler
+ * that runs every 7.8ms. So a 15ms timeout leaves us with
+ * a 2.2ms margin
+ */
+ if (atomic) {
+ /* if atomic is set, do not sleep,
+ * and try to get the completion quickly
+ * (1ms will be enough for those commands) */
+ for (sleep_total = 0;
+ sleep_total < 1000
+ && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+ sleep_total += 10)
+ udelay(10);
+ } else {
+ /* use msleep for most command */
+ for (sleep_total = 0;
+ sleep_total < 15
+ && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+ sleep_total++)
+ msleep(1);
+ }
+
+ result = ntohl(response->result);
+ value = ntohl(response->data);
+ if (result != MYRI10GE_NO_RESPONSE_RESULT) {
+ if (result == 0) {
+ data->data0 = value;
+ return 0;
+ } else {
+ dev_err(&mgp->pdev->dev,
+ "command %d failed, result = %d\n",
+ cmd, result);
+ return -ENXIO;
+ }
+ }
+
+ dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
+ cmd, result);
+ return -EAGAIN;
+}
+
+/*
+ * The eeprom strings on the lanaiX have the format
+ * SN=x\0
+ * MAC=x:x:x:x:x:x\0
+ * PT:ddd mmm xx xx:xx:xx xx\0
+ * PV:ddd mmm xx xx:xx:xx xx\0
+ */
+static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
+{
+ char *ptr, *limit;
+ int i;
+
+ ptr = mgp->eeprom_strings;
+ limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
+
+ while (*ptr != '\0' && ptr < limit) {
+ if (memcmp(ptr, "MAC=", 4) == 0) {
+ ptr += 4;
+ mgp->mac_addr_string = ptr;
+ for (i = 0; i < 6; i++) {
+ if ((ptr + 2) > limit)
+ goto abort;
+ mgp->mac_addr[i] =
+ simple_strtoul(ptr, &ptr, 16);
+ ptr += 1;
+ }
+ }
+ if (memcmp((const void *)ptr, "SN=", 3) == 0) {
+ ptr += 3;
+ mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
+ }
+ while (ptr < limit && *ptr++) ;
+ }
+
+ return 0;
+
+abort:
+ dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
+ return -ENXIO;
+}
+
+/*
+ * Enable or disable periodic RDMAs from the host to make certain
+ * chipsets resend dropped PCIe messages
+ */
+
+static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
+{
+ char __iomem *submit;
+ u32 buf[16];
+ u32 dma_low, dma_high;
+ int i;
+
+ /* clear confirmation addr */
+ mgp->cmd->data = 0;
+ mb();
+
+ /* send a rdma command to the PCIe engine, and wait for the
+ * response in the confirmation address. The firmware should
+ * write a -1 there to indicate it is alive and well
+ */
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf[0] = htonl(dma_high); /* confirm addr MSW */
+ buf[1] = htonl(dma_low); /* confirm addr LSW */
+ buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA); /* confirm data */
+ buf[3] = htonl(dma_high); /* dummy addr MSW */
+ buf[4] = htonl(dma_low); /* dummy addr LSW */
+ buf[5] = htonl(enable); /* enable? */
+
+ submit = mgp->sram + 0xfc01c0;
+
+ myri10ge_pio_copy(submit, &buf, sizeof(buf));
+ for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
+ msleep(1);
+ if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
+ dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
+ (enable ? "enable" : "disable"));
+}
+
+static int
+myri10ge_validate_firmware(struct myri10ge_priv *mgp,
+ struct mcp_gen_header *hdr)
+{
+ struct device *dev = &mgp->pdev->dev;
+ int major, minor;
+
+ /* check firmware type */
+ if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
+ dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
+ return -EINVAL;
+ }
+
+ /* save firmware version for ethtool */
+ strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
+
+ sscanf(mgp->fw_version, "%d.%d", &major, &minor);
+
+ if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) {
+ dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
+ dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
+ MXGEFW_VERSION_MINOR);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
+{
+ unsigned crc, reread_crc;
+ const struct firmware *fw;
+ struct device *dev = &mgp->pdev->dev;
+ struct mcp_gen_header *hdr;
+ size_t hdr_offset;
+ int status;
+
+ if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ dev_err(dev, "Unable to load %s firmware image via hotplug\n",
+ mgp->fw_name);
+ status = -EINVAL;
+ goto abort_with_nothing;
+ }
+
+ /* check size */
+
+ if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
+ fw->size < MCP_HEADER_PTR_OFFSET + 4) {
+ dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
+ status = -EINVAL;
+ goto abort_with_fw;
+ }
+
+ /* check id */
+ hdr_offset = ntohl(*(u32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
+ if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
+ dev_err(dev, "Bad firmware file\n");
+ status = -EINVAL;
+ goto abort_with_fw;
+ }
+ hdr = (void *)(fw->data + hdr_offset);
+
+ status = myri10ge_validate_firmware(mgp, hdr);
+ if (status != 0)
+ goto abort_with_fw;
+
+ crc = crc32(~0, fw->data, fw->size);
+ if (mgp->tx.boundary == 2048) {
+ /* Avoid PCI burst on chipset with unaligned completions. */
+ int i;
+ __iomem u32 *ptr = (__iomem u32 *) (mgp->sram +
+ MYRI10GE_FW_OFFSET);
+ for (i = 0; i < fw->size / 4; i++) {
+ __raw_writel(((u32 *) fw->data)[i], ptr + i);
+ wmb();
+ }
+ } else {
+ myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
+ fw->size);
+ }
+ /* corruption checking is good for parity recovery and buggy chipset */
+ memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
+ reread_crc = crc32(~0, fw->data, fw->size);
+ if (crc != reread_crc) {
+ dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
+ (unsigned)fw->size, reread_crc, crc);
+ status = -EIO;
+ goto abort_with_fw;
+ }
+ *size = (u32) fw->size;
+
+abort_with_fw:
+ release_firmware(fw);
+
+abort_with_nothing:
+ return status;
+}
+
+static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
+{
+ struct mcp_gen_header *hdr;
+ struct device *dev = &mgp->pdev->dev;
+ const size_t bytes = sizeof(struct mcp_gen_header);
+ size_t hdr_offset;
+ int status;
+
+ /* find running firmware header */
+ hdr_offset = ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
+
+ if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
+ dev_err(dev, "Running firmware has bad header offset (%d)\n",
+ (int)hdr_offset);
+ return -EIO;
+ }
+
+ /* copy header of running firmware from SRAM to host memory to
+ * validate firmware */
+ hdr = kmalloc(bytes, GFP_KERNEL);
+ if (hdr == NULL) {
+ dev_err(dev, "could not malloc firmware hdr\n");
+ return -ENOMEM;
+ }
+ memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
+ status = myri10ge_validate_firmware(mgp, hdr);
+ kfree(hdr);
+ return status;
+}
+
+static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
+{
+ char __iomem *submit;
+ u32 buf[16];
+ u32 dma_low, dma_high, size;
+ int status, i;
+
+ size = 0;
+ status = myri10ge_load_hotplug_firmware(mgp, &size);
+ if (status) {
+ dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
+
+ /* Do not attempt to adopt firmware if there
+ * was a bad crc */
+ if (status == -EIO)
+ return status;
+
+ status = myri10ge_adopt_running_firmware(mgp);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed to adopt running firmware\n");
+ return status;
+ }
+ dev_info(&mgp->pdev->dev,
+ "Successfully adopted running firmware\n");
+ if (mgp->tx.boundary == 4096) {
+ dev_warn(&mgp->pdev->dev,
+ "Using firmware currently running on NIC"
+ ". For optimal\n");
+ dev_warn(&mgp->pdev->dev,
+ "performance consider loading optimized "
+ "firmware\n");
+ dev_warn(&mgp->pdev->dev, "via hotplug\n");
+ }
+
+ mgp->fw_name = "adopted";
+ mgp->tx.boundary = 2048;
+ return status;
+ }
+
+ /* clear confirmation addr */
+ mgp->cmd->data = 0;
+ mb();
+
+ /* send a reload command to the bootstrap MCP, and wait for the
+ * response in the confirmation address. The firmware should
+ * write a -1 there to indicate it is alive and well
+ */
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf[0] = htonl(dma_high); /* confirm addr MSW */
+ buf[1] = htonl(dma_low); /* confirm addr LSW */
+ buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA); /* confirm data */
+
+ /* FIX: All newest firmware should un-protect the bottom of
+ * the sram before handoff. However, the very first interfaces
+ * do not. Therefore the handoff copy must skip the first 8 bytes
+ */
+ buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
+ buf[4] = htonl(size - 8); /* length of code */
+ buf[5] = htonl(8); /* where to copy to */
+ buf[6] = htonl(0); /* where to jump to */
+
+ submit = mgp->sram + 0xfc0000;
+
+ myri10ge_pio_copy(submit, &buf, sizeof(buf));
+ mb();
+ msleep(1);
+ mb();
+ i = 0;
+ while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) {
+ msleep(1);
+ i++;
+ }
+ if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
+ dev_err(&mgp->pdev->dev, "handoff failed\n");
+ return -ENXIO;
+ }
+ dev_info(&mgp->pdev->dev, "handoff confirmed\n");
+ myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+
+ return 0;
+}
+
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+
+ cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
+ | (addr[2] << 8) | addr[3]);
+
+ cmd.data1 = ((addr[4] << 8) | (addr[5]));
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
+ return status;
+}
+
+static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
+{
+ struct myri10ge_cmd cmd;
+ int status, ctl;
+
+ ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
+ status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
+
+ if (status) {
+ printk(KERN_ERR
+ "myri10ge: %s: Failed to set flow control mode\n",
+ mgp->dev->name);
+ return status;
+ }
+ mgp->pause = pause;
+ return 0;
+}
+
+static void
+myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
+{
+ struct myri10ge_cmd cmd;
+ int status, ctl;
+
+ ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
+ status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
+ if (status)
+ printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n",
+ mgp->dev->name);
+}
+
+static int myri10ge_reset(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+ size_t bytes;
+ u32 len;
+
+ /* try to send a reset command to the card to see if it
+ * is alive */
+ memset(&cmd, 0, sizeof(cmd));
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed reset\n");
+ return -ENXIO;
+ }
+
+ /* Now exchange information about interrupts */
+
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ memset(mgp->rx_done.entry, 0, bytes);
+ cmd.data0 = (u32) bytes;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0);
+
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
+ mgp->irq_claim = (__iomem u32 *) (mgp->sram + cmd.data0);
+ if (!mgp->msi_enabled) {
+ status |= myri10ge_send_cmd
+ (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0);
+ mgp->irq_deassert = (__iomem u32 *) (mgp->sram + cmd.data0);
+
+ }
+ status |= myri10ge_send_cmd
+ (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
+ mgp->intr_coal_delay_ptr = (__iomem u32 *) (mgp->sram + cmd.data0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
+ return status;
+ }
+ __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+
+ /* Run a small DMA test.
+ * The magic multipliers to the length tell the firmware
+ * to do DMA read, write, or read+write tests. The
+ * results are returned in cmd.data0. The upper 16
+ * bits or the return is the number of transfers completed.
+ * The lower 16 bits is the time in 0.5us ticks that the
+ * transfers took to complete.
+ */
+
+ len = mgp->tx.boundary;
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data2 = len * 0x10000;
+ status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+ if (status == 0)
+ mgp->read_dma = ((cmd.data0 >> 16) * len * 2) /
+ (cmd.data0 & 0xffff);
+ else
+ dev_warn(&mgp->pdev->dev, "DMA read benchmark failed: %d\n",
+ status);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data2 = len * 0x1;
+ status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+ if (status == 0)
+ mgp->write_dma = ((cmd.data0 >> 16) * len * 2) /
+ (cmd.data0 & 0xffff);
+ else
+ dev_warn(&mgp->pdev->dev, "DMA write benchmark failed: %d\n",
+ status);
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data2 = len * 0x10001;
+ status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+ if (status == 0)
+ mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
+ (cmd.data0 & 0xffff);
+ else
+ dev_warn(&mgp->pdev->dev,
+ "DMA read/write benchmark failed: %d\n", status);
+
+ memset(mgp->rx_done.entry, 0, bytes);
+
+ /* reset mcp/driver shared state back to 0 */
+ mgp->tx.req = 0;
+ mgp->tx.done = 0;
+ mgp->tx.pkt_start = 0;
+ mgp->tx.pkt_done = 0;
+ mgp->rx_big.cnt = 0;
+ mgp->rx_small.cnt = 0;
+ mgp->rx_done.idx = 0;
+ mgp->rx_done.cnt = 0;
+ status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
+ myri10ge_change_promisc(mgp, 0, 0);
+ myri10ge_change_pause(mgp, mgp->pause);
+ return status;
+}
+
+static inline void
+myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
+ struct mcp_kreq_ether_recv *src)
+{
+ u32 low;
+
+ low = src->addr_low;
+ src->addr_low = DMA_32BIT_MASK;
+ myri10ge_pio_copy(dst, src, 8 * sizeof(*src));
+ mb();
+ src->addr_low = low;
+ __raw_writel(low, &dst->addr_low);
+ mb();
+}
+
+/*
+ * Set of routines to get a new receive buffer. Any buffer which
+ * crosses a 4KB boundary must start on a 4KB boundary due to PCIe
+ * wdma restrictions. We also try to align any smaller allocation to
+ * at least a 16 byte boundary for efficiency. We assume the linux
+ * memory allocator works by powers of 2, and will not return memory
+ * smaller than 2KB which crosses a 4KB boundary. If it does, we fall
+ * back to allocating 2x as much space as required.
+ *
+ * We intend to replace large (>4KB) skb allocations by using
+ * pages directly and building a fraglist in the near future.
+ */
+
+static inline struct sk_buff *myri10ge_alloc_big(int bytes)
+{
+ struct sk_buff *skb;
+ unsigned long data, roundup;
+
+ skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD);
+ if (skb == NULL)
+ return NULL;
+
+ /* Correct skb->truesize so that socket buffer
+ * accounting is not confused the rounding we must
+ * do to satisfy alignment constraints.
+ */
+ skb->truesize -= 4096;
+
+ data = (unsigned long)(skb->data);
+ roundup = (-data) & (4095);
+ skb_reserve(skb, roundup);
+ return skb;
+}
+
+/* Allocate 2x as much space as required and use whichever portion
+ * does not cross a 4KB boundary */
+static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
+{
+ struct sk_buff *skb;
+ unsigned long data, boundary;
+
+ skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1);
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ /* Correct skb->truesize so that socket buffer
+ * accounting is not confused the rounding we must
+ * do to satisfy alignment constraints.
+ */
+ skb->truesize -= bytes + MXGEFW_PAD;
+
+ data = (unsigned long)(skb->data);
+ boundary = (data + 4095UL) & ~4095UL;
+ if ((boundary - data) >= (bytes + MXGEFW_PAD))
+ return skb;
+
+ skb_reserve(skb, boundary - data);
+ return skb;
+}
+
+/* Allocate just enough space, and verify that the allocated
+ * space does not cross a 4KB boundary */
+static inline struct sk_buff *myri10ge_alloc_small(int bytes)
+{
+ struct sk_buff *skb;
+ unsigned long roundup, data, end;
+
+ skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD);
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ /* Round allocated buffer to 16 byte boundary */
+ data = (unsigned long)(skb->data);
+ roundup = (-data) & 15UL;
+ skb_reserve(skb, roundup);
+ /* Verify that the data buffer does not cross a page boundary */
+ data = (unsigned long)(skb->data);
+ end = data + bytes + MXGEFW_PAD - 1;
+ if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) {
+ printk(KERN_NOTICE
+ "myri10ge_alloc_small: small skb crossed 4KB boundary\n");
+ myri10ge_skb_cross_4k = 1;
+ dev_kfree_skb_any(skb);
+ skb = myri10ge_alloc_small_safe(bytes);
+ }
+ return skb;
+}
+
+static inline int
+myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
+ int idx)
+{
+ struct sk_buff *skb;
+ dma_addr_t bus;
+ int len, retval = 0;
+
+ bytes += VLAN_HLEN; /* account for 802.1q vlan tag */
+
+ if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
+ skb = myri10ge_alloc_big(bytes);
+ else if (myri10ge_skb_cross_4k)
+ skb = myri10ge_alloc_small_safe(bytes);
+ else
+ skb = myri10ge_alloc_small(bytes);
+
+ if (unlikely(skb == NULL)) {
+ rx->alloc_fail++;
+ retval = -ENOBUFS;
+ goto done;
+ }
+
+ /* set len so that it only covers the area we
+ * need mapped for DMA */
+ len = bytes + MXGEFW_PAD;
+
+ bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+ rx->info[idx].skb = skb;
+ pci_unmap_addr_set(&rx->info[idx], bus, bus);
+ pci_unmap_len_set(&rx->info[idx], len, len);
+ rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus));
+ rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+
+done:
+ /* copy 8 descriptors (64-bytes) to the mcp at a time */
+ if ((idx & 7) == 7) {
+ if (rx->wc_fifo == NULL)
+ myri10ge_submit_8rx(&rx->lanai[idx - 7],
+ &rx->shadow[idx - 7]);
+ else {
+ mb();
+ myri10ge_pio_copy(rx->wc_fifo,
+ &rx->shadow[idx - 7], 64);
+ }
+ }
+ return retval;
+}
+
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
+{
+ struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
+
+ if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
+ (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
+ vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
+ skb->csum = hw_csum;
+ skb->ip_summed = CHECKSUM_HW;
+ }
+}
+
+static inline unsigned long
+myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+ int bytes, int len, int csum)
+{
+ dma_addr_t bus;
+ struct sk_buff *skb;
+ int idx, unmap_len;
+
+ idx = rx->cnt & rx->mask;
+ rx->cnt++;
+
+ /* save a pointer to the received skb */
+ skb = rx->info[idx].skb;
+ bus = pci_unmap_addr(&rx->info[idx], bus);
+ unmap_len = pci_unmap_len(&rx->info[idx], len);
+
+ /* try to replace the received skb */
+ if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) {
+ /* drop the frame -- the old skbuf is re-cycled */
+ mgp->stats.rx_dropped += 1;
+ return 0;
+ }
+
+ /* unmap the recvd skb */
+ pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE);
+
+ /* mcp implicitly skips 1st bytes so that packet is properly
+ * aligned */
+ skb_reserve(skb, MXGEFW_PAD);
+
+ /* set the length of the frame */
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, mgp->dev);
+ skb->dev = mgp->dev;
+ if (mgp->csum_flag) {
+ if ((skb->protocol == ntohs(ETH_P_IP)) ||
+ (skb->protocol == ntohs(ETH_P_IPV6))) {
+ skb->csum = ntohs((u16) csum);
+ skb->ip_summed = CHECKSUM_HW;
+ } else
+ myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
+ }
+
+ netif_receive_skb(skb);
+ mgp->dev->last_rx = jiffies;
+ return 1;
+}
+
+static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct sk_buff *skb;
+ int idx, len;
+ int limit = 0;
+
+ while (tx->pkt_done != mcp_index) {
+ idx = tx->done & tx->mask;
+ skb = tx->info[idx].skb;
+
+ /* Mark as free */
+ tx->info[idx].skb = NULL;
+ if (tx->info[idx].last) {
+ tx->pkt_done++;
+ tx->info[idx].last = 0;
+ }
+ tx->done++;
+ len = pci_unmap_len(&tx->info[idx], len);
+ pci_unmap_len_set(&tx->info[idx], len, 0);
+ if (skb) {
+ mgp->stats.tx_bytes += skb->len;
+ mgp->stats.tx_packets++;
+ dev_kfree_skb_irq(skb);
+ if (len)
+ pci_unmap_single(pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ } else {
+ if (len)
+ pci_unmap_page(pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ }
+
+ /* limit potential for livelock by only handling
+ * 2 full tx rings per call */
+ if (unlikely(++limit > 2 * tx->mask))
+ break;
+ }
+ /* start the queue if we've stopped it */
+ if (netif_queue_stopped(mgp->dev)
+ && tx->req - tx->done < (tx->mask >> 1)) {
+ mgp->wake_queue++;
+ netif_wake_queue(mgp->dev);
+ }
+}
+
+static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
+{
+ struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+ unsigned long rx_bytes = 0;
+ unsigned long rx_packets = 0;
+ unsigned long rx_ok;
+
+ int idx = rx_done->idx;
+ int cnt = rx_done->cnt;
+ u16 length;
+ u16 checksum;
+
+ while (rx_done->entry[idx].length != 0 && *limit != 0) {
+ length = ntohs(rx_done->entry[idx].length);
+ rx_done->entry[idx].length = 0;
+ checksum = ntohs(rx_done->entry[idx].checksum);
+ if (length <= mgp->small_bytes)
+ rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
+ mgp->small_bytes,
+ length, checksum);
+ else
+ rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
+ mgp->dev->mtu + ETH_HLEN,
+ length, checksum);
+ rx_packets += rx_ok;
+ rx_bytes += rx_ok * (unsigned long)length;
+ cnt++;
+ idx = cnt & (myri10ge_max_intr_slots - 1);
+
+ /* limit potential for livelock by only handling a
+ * limited number of frames. */
+ (*limit)--;
+ }
+ rx_done->idx = idx;
+ rx_done->cnt = cnt;
+ mgp->stats.rx_packets += rx_packets;
+ mgp->stats.rx_bytes += rx_bytes;
+}
+
+static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
+{
+ struct mcp_irq_data *stats = mgp->fw_stats;
+
+ if (unlikely(stats->stats_updated)) {
+ if (mgp->link_state != stats->link_up) {
+ mgp->link_state = stats->link_up;
+ if (mgp->link_state) {
+ printk(KERN_INFO "myri10ge: %s: link up\n",
+ mgp->dev->name);
+ netif_carrier_on(mgp->dev);
+ } else {
+ printk(KERN_INFO "myri10ge: %s: link down\n",
+ mgp->dev->name);
+ netif_carrier_off(mgp->dev);
+ }
+ }
+ if (mgp->rdma_tags_available !=
+ ntohl(mgp->fw_stats->rdma_tags_available)) {
+ mgp->rdma_tags_available =
+ ntohl(mgp->fw_stats->rdma_tags_available);
+ printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
+ "%d tags left\n", mgp->dev->name,
+ mgp->rdma_tags_available);
+ }
+ mgp->down_cnt += stats->link_down;
+ if (stats->link_down)
+ wake_up(&mgp->down_wq);
+ }
+}
+
+static int myri10ge_poll(struct net_device *netdev, int *budget)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+ int limit, orig_limit, work_done;
+
+ /* process as many rx events as NAPI will allow */
+ limit = min(*budget, netdev->quota);
+ orig_limit = limit;
+ myri10ge_clean_rx_done(mgp, &limit);
+ work_done = orig_limit - limit;
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) {
+ netif_rx_complete(netdev);
+ __raw_writel(htonl(3), mgp->irq_claim);
+ return 0;
+ }
+ return 1;
+}
+
+static irqreturn_t myri10ge_intr(int irq, void *arg, struct pt_regs *regs)
+{
+ struct myri10ge_priv *mgp = arg;
+ struct mcp_irq_data *stats = mgp->fw_stats;
+ struct myri10ge_tx_buf *tx = &mgp->tx;
+ u32 send_done_count;
+ int i;
+
+ /* make sure it is our IRQ, and that the DMA has finished */
+ if (unlikely(!stats->valid))
+ return (IRQ_NONE);
+
+ /* low bit indicates receives are present, so schedule
+ * napi poll handler */
+ if (stats->valid & 1)
+ netif_rx_schedule(mgp->dev);
+
+ if (!mgp->msi_enabled) {
+ __raw_writel(0, mgp->irq_deassert);
+ if (!myri10ge_deassert_wait)
+ stats->valid = 0;
+ mb();
+ } else
+ stats->valid = 0;
+
+ /* Wait for IRQ line to go low, if using INTx */
+ i = 0;
+ while (1) {
+ i++;
+ /* check for transmit completes and receives */
+ send_done_count = ntohl(stats->send_done_count);
+ if (send_done_count != tx->pkt_done)
+ myri10ge_tx_done(mgp, (int)send_done_count);
+ if (unlikely(i > myri10ge_max_irq_loops)) {
+ printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
+ mgp->dev->name);
+ stats->valid = 0;
+ schedule_work(&mgp->watchdog_work);
+ }
+ if (likely(stats->valid == 0))
+ break;
+ cpu_relax();
+ barrier();
+ }
+
+ myri10ge_check_statblock(mgp);
+
+ __raw_writel(htonl(3), mgp->irq_claim + 1);
+ return (IRQ_HANDLED);
+}
+
+static int
+myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static void
+myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ strlcpy(info->driver, "myri10ge", sizeof(info->driver));
+ strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+ strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+}
+
+static int
+myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ coal->rx_coalesce_usecs = mgp->intr_coal_delay;
+ return 0;
+}
+
+static int
+myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ mgp->intr_coal_delay = coal->rx_coalesce_usecs;
+ __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+ return 0;
+}
+
+static void
+myri10ge_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ pause->autoneg = 0;
+ pause->rx_pause = mgp->pause;
+ pause->tx_pause = mgp->pause;
+}
+
+static int
+myri10ge_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ if (pause->tx_pause != mgp->pause)
+ return myri10ge_change_pause(mgp, pause->tx_pause);
+ if (pause->rx_pause != mgp->pause)
+ return myri10ge_change_pause(mgp, pause->tx_pause);
+ if (pause->autoneg != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void
+myri10ge_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ ring->rx_mini_max_pending = mgp->rx_small.mask + 1;
+ ring->rx_max_pending = mgp->rx_big.mask + 1;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = mgp->rx_small.mask + 1;
+ ring->rx_mini_pending = ring->rx_mini_max_pending;
+ ring->rx_pending = ring->rx_max_pending;
+ ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
+ ring->tx_pending = ring->tx_max_pending;
+}
+
+static u32 myri10ge_get_rx_csum(struct net_device *netdev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ if (mgp->csum_flag)
+ return 1;
+ else
+ return 0;
+}
+
+static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ if (csum_enabled)
+ mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
+ else
+ mgp->csum_flag = 0;
+ return 0;
+}
+
+static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+ /* device-specific stats */
+ "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
+ "serial_number", "tx_pkt_start", "tx_pkt_done",
+ "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
+ "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
+ "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered",
+ "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
+ "dropped_no_big_buffer"
+};
+
+#define MYRI10GE_NET_STATS_LEN 21
+#define MYRI10GE_STATS_LEN sizeof(myri10ge_gstrings_stats) / ETH_GSTRING_LEN
+
+static void
+myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *myri10ge_gstrings_stats,
+ sizeof(myri10ge_gstrings_stats));
+ break;
+ }
+}
+
+static int myri10ge_get_stats_count(struct net_device *netdev)
+{
+ return MYRI10GE_STATS_LEN;
+}
+
+static void
+myri10ge_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
+ data[i] = ((unsigned long *)&mgp->stats)[i];
+
+ data[i++] = (unsigned int)mgp->read_dma;
+ data[i++] = (unsigned int)mgp->write_dma;
+ data[i++] = (unsigned int)mgp->read_write_dma;
+ data[i++] = (unsigned int)mgp->serial_number;
+ data[i++] = (unsigned int)mgp->tx.pkt_start;
+ data[i++] = (unsigned int)mgp->tx.pkt_done;
+ data[i++] = (unsigned int)mgp->tx.req;
+ data[i++] = (unsigned int)mgp->tx.done;
+ data[i++] = (unsigned int)mgp->rx_small.cnt;
+ data[i++] = (unsigned int)mgp->rx_big.cnt;
+ data[i++] = (unsigned int)mgp->wake_queue;
+ data[i++] = (unsigned int)mgp->stop_queue;
+ data[i++] = (unsigned int)mgp->watchdog_resets;
+ data[i++] = (unsigned int)mgp->tx_linearized;
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
+ data[i++] =
+ (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
+}
+
+static struct ethtool_ops myri10ge_ethtool_ops = {
+ .get_settings = myri10ge_get_settings,
+ .get_drvinfo = myri10ge_get_drvinfo,
+ .get_coalesce = myri10ge_get_coalesce,
+ .set_coalesce = myri10ge_set_coalesce,
+ .get_pauseparam = myri10ge_get_pauseparam,
+ .set_pauseparam = myri10ge_set_pauseparam,
+ .get_ringparam = myri10ge_get_ringparam,
+ .get_rx_csum = myri10ge_get_rx_csum,
+ .set_rx_csum = myri10ge_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+#endif
+ .get_strings = myri10ge_get_strings,
+ .get_stats_count = myri10ge_get_stats_count,
+ .get_ethtool_stats = myri10ge_get_ethtool_stats
+};
+
+static int myri10ge_allocate_rings(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_cmd cmd;
+ int tx_ring_size, rx_ring_size;
+ int tx_ring_entries, rx_ring_entries;
+ int i, status;
+ size_t bytes;
+
+ mgp = netdev_priv(dev);
+
+ /* get ring sizes */
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
+ tx_ring_size = cmd.data0;
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+ rx_ring_size = cmd.data0;
+
+ tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
+ rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
+ mgp->tx.mask = tx_ring_entries - 1;
+ mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;
+
+ /* allocate the host shadow rings */
+
+ bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
+ * sizeof(*mgp->tx.req_list);
+ mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->tx.req_bytes == NULL)
+ goto abort_with_nothing;
+
+ /* ensure req_list entries are aligned to 8 bytes */
+ mgp->tx.req_list = (struct mcp_kreq_ether_send *)
+ ALIGN((unsigned long)mgp->tx.req_bytes, 8);
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow);
+ mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_small.shadow == NULL)
+ goto abort_with_tx_req_bytes;
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow);
+ mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_big.shadow == NULL)
+ goto abort_with_rx_small_shadow;
+
+ /* allocate the host info rings */
+
+ bytes = tx_ring_entries * sizeof(*mgp->tx.info);
+ mgp->tx.info = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->tx.info == NULL)
+ goto abort_with_rx_big_shadow;
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_small.info);
+ mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_small.info == NULL)
+ goto abort_with_tx_info;
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_big.info);
+ mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_big.info == NULL)
+ goto abort_with_rx_small_info;
+
+ /* Fill the receive rings */
+
+ for (i = 0; i <= mgp->rx_small.mask; i++) {
+ status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev,
+ mgp->small_bytes, i);
+ if (status) {
+ printk(KERN_ERR
+ "myri10ge: %s: alloced only %d small bufs\n",
+ dev->name, i);
+ goto abort_with_rx_small_ring;
+ }
+ }
+
+ for (i = 0; i <= mgp->rx_big.mask; i++) {
+ status =
+ myri10ge_getbuf(&mgp->rx_big, mgp->pdev,
+ dev->mtu + ETH_HLEN, i);
+ if (status) {
+ printk(KERN_ERR
+ "myri10ge: %s: alloced only %d big bufs\n",
+ dev->name, i);
+ goto abort_with_rx_big_ring;
+ }
+ }
+
+ return 0;
+
+abort_with_rx_big_ring:
+ for (i = 0; i <= mgp->rx_big.mask; i++) {
+ if (mgp->rx_big.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_big.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_big.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_big.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_big.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+
+abort_with_rx_small_ring:
+ for (i = 0; i <= mgp->rx_small.mask; i++) {
+ if (mgp->rx_small.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_small.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_small.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_small.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_small.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+ kfree(mgp->rx_big.info);
+
+abort_with_rx_small_info:
+ kfree(mgp->rx_small.info);
+
+abort_with_tx_info:
+ kfree(mgp->tx.info);
+
+abort_with_rx_big_shadow:
+ kfree(mgp->rx_big.shadow);
+
+abort_with_rx_small_shadow:
+ kfree(mgp->rx_small.shadow);
+
+abort_with_tx_req_bytes:
+ kfree(mgp->tx.req_bytes);
+ mgp->tx.req_bytes = NULL;
+ mgp->tx.req_list = NULL;
+
+abort_with_nothing:
+ return status;
+}
+
+static void myri10ge_free_rings(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct sk_buff *skb;
+ struct myri10ge_tx_buf *tx;
+ int i, len, idx;
+
+ mgp = netdev_priv(dev);
+
+ for (i = 0; i <= mgp->rx_big.mask; i++) {
+ if (mgp->rx_big.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_big.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_big.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_big.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_big.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ for (i = 0; i <= mgp->rx_small.mask; i++) {
+ if (mgp->rx_small.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_small.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_small.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_small.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_small.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ tx = &mgp->tx;
+ while (tx->done != tx->req) {
+ idx = tx->done & tx->mask;
+ skb = tx->info[idx].skb;
+
+ /* Mark as free */
+ tx->info[idx].skb = NULL;
+ tx->done++;
+ len = pci_unmap_len(&tx->info[idx], len);
+ pci_unmap_len_set(&tx->info[idx], len, 0);
+ if (skb) {
+ mgp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ if (len)
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ } else {
+ if (len)
+ pci_unmap_page(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ }
+ }
+ kfree(mgp->rx_big.info);
+
+ kfree(mgp->rx_small.info);
+
+ kfree(mgp->tx.info);
+
+ kfree(mgp->rx_big.shadow);
+
+ kfree(mgp->rx_small.shadow);
+
+ kfree(mgp->tx.req_bytes);
+ mgp->tx.req_bytes = NULL;
+ mgp->tx.req_list = NULL;
+}
+
+static int myri10ge_open(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_cmd cmd;
+ int status, big_pow2;
+
+ mgp = netdev_priv(dev);
+
+ if (mgp->running != MYRI10GE_ETH_STOPPED)
+ return -EBUSY;
+
+ mgp->running = MYRI10GE_ETH_STARTING;
+ status = myri10ge_reset(mgp);
+ if (status != 0) {
+ printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name);
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENXIO;
+ }
+
+ /* decide what small buffer size to use. For good TCP rx
+ * performance, it is important to not receive 1514 byte
+ * frames into jumbo buffers, as it confuses the socket buffer
+ * accounting code, leading to drops and erratic performance.
+ */
+
+ if (dev->mtu <= ETH_DATA_LEN)
+ mgp->small_bytes = 128; /* enough for a TCP header */
+ else
+ mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */
+
+ /* Override the small buffer size? */
+ if (myri10ge_small_bytes > 0)
+ mgp->small_bytes = myri10ge_small_bytes;
+
+ /* If the user sets an obscenely small MTU, adjust the small
+ * bytes down to nearly nothing */
+ if (mgp->small_bytes >= (dev->mtu + ETH_HLEN))
+ mgp->small_bytes = 64;
+
+ /* get the lanai pointers to the send and receive rings */
+
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
+ mgp->tx.lanai =
+ (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
+
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
+ mgp->rx_small.lanai =
+ (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
+
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
+ mgp->rx_big.lanai =
+ (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
+
+ if (status != 0) {
+ printk(KERN_ERR
+ "myri10ge: %s: failed to get ring sizes or locations\n",
+ dev->name);
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENXIO;
+ }
+
+ if (mgp->mtrr >= 0) {
+ mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000;
+ mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000;
+ mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000;
+ } else {
+ mgp->tx.wc_fifo = NULL;
+ mgp->rx_small.wc_fifo = NULL;
+ mgp->rx_big.wc_fifo = NULL;
+ }
+
+ status = myri10ge_allocate_rings(dev);
+ if (status != 0)
+ goto abort_with_nothing;
+
+ /* Firmware needs the big buff size as a power of 2. Lie and
+ * tell him the buffer is larger, because we only use 1
+ * buffer/pkt, and the mtu will prevent overruns.
+ */
+ big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD;
+ while ((big_pow2 & (big_pow2 - 1)) != 0)
+ big_pow2++;
+
+ /* now give firmware buffers sizes, and MTU */
+ cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
+ cmd.data0 = mgp->small_bytes;
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
+ cmd.data0 = big_pow2;
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
+ if (status) {
+ printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n",
+ dev->name);
+ goto abort_with_rings;
+ }
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0);
+ if (status) {
+ printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
+ dev->name);
+ goto abort_with_rings;
+ }
+
+ mgp->link_state = -1;
+ mgp->rdma_tags_available = 15;
+
+ netif_poll_enable(mgp->dev); /* must happen prior to any irq */
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
+ if (status) {
+ printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
+ dev->name);
+ goto abort_with_rings;
+ }
+
+ mgp->wake_queue = 0;
+ mgp->stop_queue = 0;
+ mgp->running = MYRI10GE_ETH_RUNNING;
+ mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
+ add_timer(&mgp->watchdog_timer);
+ netif_wake_queue(dev);
+ return 0;
+
+abort_with_rings:
+ myri10ge_free_rings(dev);
+
+abort_with_nothing:
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENOMEM;
+}
+
+static int myri10ge_close(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_cmd cmd;
+ int status, old_down_cnt;
+
+ mgp = netdev_priv(dev);
+
+ if (mgp->running != MYRI10GE_ETH_RUNNING)
+ return 0;
+
+ if (mgp->tx.req_bytes == NULL)
+ return 0;
+
+ del_timer_sync(&mgp->watchdog_timer);
+ mgp->running = MYRI10GE_ETH_STOPPING;
+ netif_poll_disable(mgp->dev);
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ old_down_cnt = mgp->down_cnt;
+ mb();
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
+ if (status)
+ printk(KERN_ERR "myri10ge: %s: Couldn't bring down link\n",
+ dev->name);
+
+ wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, HZ);
+ if (old_down_cnt == mgp->down_cnt)
+ printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name);
+
+ netif_tx_disable(dev);
+
+ myri10ge_free_rings(dev);
+
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return 0;
+}
+
+/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
+ * backwards one at a time and handle ring wraps */
+
+static inline void
+myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
+ struct mcp_kreq_ether_send *src, int cnt)
+{
+ int idx, starting_slot;
+ starting_slot = tx->req;
+ while (cnt > 1) {
+ cnt--;
+ idx = (starting_slot + cnt) & tx->mask;
+ myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
+ mb();
+ }
+}
+
+/*
+ * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
+ * at most 32 bytes at a time, so as to avoid involving the software
+ * pio handler in the nic. We re-write the first segment's flags
+ * to mark them valid only after writing the entire chain.
+ */
+
+static inline void
+myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
+ int cnt)
+{
+ int idx, i;
+ struct mcp_kreq_ether_send __iomem *dstp, *dst;
+ struct mcp_kreq_ether_send *srcp;
+ u8 last_flags;
+
+ idx = tx->req & tx->mask;
+
+ last_flags = src->flags;
+ src->flags = 0;
+ mb();
+ dst = dstp = &tx->lanai[idx];
+ srcp = src;
+
+ if ((idx + cnt) < tx->mask) {
+ for (i = 0; i < (cnt - 1); i += 2) {
+ myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
+ mb(); /* force write every 32 bytes */
+ srcp += 2;
+ dstp += 2;
+ }
+ } else {
+ /* submit all but the first request, and ensure
+ * that it is submitted below */
+ myri10ge_submit_req_backwards(tx, src, cnt);
+ i = 0;
+ }
+ if (i < cnt) {
+ /* submit the first request */
+ myri10ge_pio_copy(dstp, srcp, sizeof(*src));
+ mb(); /* barrier before setting valid flag */
+ }
+
+ /* re-write the last 32-bits with the valid flags */
+ src->flags = last_flags;
+ __raw_writel(*((u32 *) src + 3), (u32 __iomem *) dst + 3);
+ tx->req += cnt;
+ mb();
+}
+
+static inline void
+myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
+ struct mcp_kreq_ether_send *src, int cnt)
+{
+ tx->req += cnt;
+ mb();
+ while (cnt >= 4) {
+ myri10ge_pio_copy(tx->wc_fifo, src, 64);
+ mb();
+ src += 4;
+ cnt -= 4;
+ }
+ if (cnt > 0) {
+ /* pad it to 64 bytes. The src is 64 bytes bigger than it
+ * needs to be so that we don't overrun it */
+ myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64);
+ mb();
+ }
+}
+
+/*
+ * Transmit a packet. We need to split the packet so that a single
+ * segment does not cross myri10ge->tx.boundary, so this makes segment
+ * counting tricky. So rather than try to count segments up front, we
+ * just give up if there are too few segments to hold a reasonably
+ * fragmented packet currently available. If we run
+ * out of segments while preparing a packet for DMA, we just linearize
+ * it and try again.
+ */
+
+static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct mcp_kreq_ether_send *req;
+ struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct skb_frag_struct *frag;
+ dma_addr_t bus;
+ u32 low, high_swapped;
+ unsigned int len;
+ int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+ u16 pseudo_hdr_offset, cksum_offset;
+ int cum_len, seglen, boundary, rdma_count;
+ u8 flags, odd_flag;
+
+again:
+ req = tx->req_list;
+ avail = tx->mask - 1 - (tx->req - tx->done);
+
+ mss = 0;
+ max_segments = MXGEFW_MAX_SEND_DESC;
+
+#ifdef NETIF_F_TSO
+ if (skb->len > (dev->mtu + ETH_HLEN)) {
+ mss = skb_shinfo(skb)->tso_size;
+ if (mss != 0)
+ max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
+ }
+#endif /*NETIF_F_TSO */
+
+ if ((unlikely(avail < max_segments))) {
+ /* we are out of transmit resources */
+ mgp->stop_queue++;
+ netif_stop_queue(dev);
+ return 1;
+ }
+
+ /* Setup checksum offloading, if needed */
+ cksum_offset = 0;
+ pseudo_hdr_offset = 0;
+ odd_flag = 0;
+ flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
+ if (likely(skb->ip_summed == CHECKSUM_HW)) {
+ cksum_offset = (skb->h.raw - skb->data);
+ pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
+ /* If the headers are excessively large, then we must
+ * fall back to a software checksum */
+ if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
+ if (skb_checksum_help(skb, 0))
+ goto drop;
+ cksum_offset = 0;
+ pseudo_hdr_offset = 0;
+ } else {
+ pseudo_hdr_offset = htons(pseudo_hdr_offset);
+ odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
+ flags |= MXGEFW_FLAGS_CKSUM;
+ }
+ }
+
+ cum_len = 0;
+
+#ifdef NETIF_F_TSO
+ if (mss) { /* TSO */
+ /* this removes any CKSUM flag from before */
+ flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
+
+ /* negative cum_len signifies to the
+ * send loop that we are still in the
+ * header portion of the TSO packet.
+ * TSO header must be at most 134 bytes long */
+ cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+
+ /* for TSO, pseudo_hdr_offset holds mss.
+ * The firmware figures out where to put
+ * the checksum by parsing the header. */
+ pseudo_hdr_offset = htons(mss);
+ } else
+#endif /*NETIF_F_TSO */
+ /* Mark small packets, and pad out tiny packets */
+ if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
+ flags |= MXGEFW_FLAGS_SMALL;
+
+ /* pad frames to at least ETH_ZLEN bytes */
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL) {
+ /* The packet is gone, so we must
+ * return 0 */
+ mgp->stats.tx_dropped += 1;
+ return 0;
+ }
+ /* adjust the len to account for the zero pad
+ * so that the nic can know how long it is */
+ skb->len = ETH_ZLEN;
+ }
+ }
+
+ /* map the skb for DMA */
+ len = skb->len - skb->data_len;
+ idx = tx->req & tx->mask;
+ tx->info[idx].skb = skb;
+ bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&tx->info[idx], bus, bus);
+ pci_unmap_len_set(&tx->info[idx], len, len);
+
+ frag_cnt = skb_shinfo(skb)->nr_frags;
+ frag_idx = 0;
+ count = 0;
+ rdma_count = 0;
+
+ /* "rdma_count" is the number of RDMAs belonging to the
+ * current packet BEFORE the current send request. For
+ * non-TSO packets, this is equal to "count".
+ * For TSO packets, rdma_count needs to be reset
+ * to 0 after a segment cut.
+ *
+ * The rdma_count field of the send request is
+ * the number of RDMAs of the packet starting at
+ * that request. For TSO send requests with one ore more cuts
+ * in the middle, this is the number of RDMAs starting
+ * after the last cut in the request. All previous
+ * segments before the last cut implicitly have 1 RDMA.
+ *
+ * Since the number of RDMAs is not known beforehand,
+ * it must be filled-in retroactively - after each
+ * segmentation cut or at the end of the entire packet.
+ */
+
+ while (1) {
+ /* Break the SKB or Fragment up into pieces which
+ * do not cross mgp->tx.boundary */
+ low = MYRI10GE_LOWPART_TO_U32(bus);
+ high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+ while (len) {
+ u8 flags_next;
+ int cum_len_next;
+
+ if (unlikely(count == max_segments))
+ goto abort_linearize;
+
+ boundary = (low + tx->boundary) & ~(tx->boundary - 1);
+ seglen = boundary - low;
+ if (seglen > len)
+ seglen = len;
+ flags_next = flags & ~MXGEFW_FLAGS_FIRST;
+ cum_len_next = cum_len + seglen;
+#ifdef NETIF_F_TSO
+ if (mss) { /* TSO */
+ (req - rdma_count)->rdma_count = rdma_count + 1;
+
+ if (likely(cum_len >= 0)) { /* payload */
+ int next_is_first, chop;
+
+ chop = (cum_len_next > mss);
+ cum_len_next = cum_len_next % mss;
+ next_is_first = (cum_len_next == 0);
+ flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
+ flags_next |= next_is_first *
+ MXGEFW_FLAGS_FIRST;
+ rdma_count |= -(chop | next_is_first);
+ rdma_count += chop & !next_is_first;
+ } else if (likely(cum_len_next >= 0)) { /* header ends */
+ int small;
+
+ rdma_count = -1;
+ cum_len_next = 0;
+ seglen = -cum_len;
+ small = (mss <= MXGEFW_SEND_SMALL_SIZE);
+ flags_next = MXGEFW_FLAGS_TSO_PLD |
+ MXGEFW_FLAGS_FIRST |
+ (small * MXGEFW_FLAGS_SMALL);
+ }
+ }
+#endif /* NETIF_F_TSO */
+ req->addr_high = high_swapped;
+ req->addr_low = htonl(low);
+ req->pseudo_hdr_offset = pseudo_hdr_offset;
+ req->pad = 0; /* complete solid 16-byte block; does this matter? */
+ req->rdma_count = 1;
+ req->length = htons(seglen);
+ req->cksum_offset = cksum_offset;
+ req->flags = flags | ((cum_len & 1) * odd_flag);
+
+ low += seglen;
+ len -= seglen;
+ cum_len = cum_len_next;
+ flags = flags_next;
+ req++;
+ count++;
+ rdma_count++;
+ if (unlikely(cksum_offset > seglen))
+ cksum_offset -= seglen;
+ else
+ cksum_offset = 0;
+ }
+ if (frag_idx == frag_cnt)
+ break;
+
+ /* map next fragment for DMA */
+ idx = (count + tx->req) & tx->mask;
+ frag = &skb_shinfo(skb)->frags[frag_idx];
+ frag_idx++;
+ len = frag->size;
+ bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
+ len, PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&tx->info[idx], bus, bus);
+ pci_unmap_len_set(&tx->info[idx], len, len);
+ }
+
+ (req - rdma_count)->rdma_count = rdma_count;
+#ifdef NETIF_F_TSO
+ if (mss)
+ do {
+ req--;
+ req->flags |= MXGEFW_FLAGS_TSO_LAST;
+ } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
+ MXGEFW_FLAGS_FIRST)));
+#endif
+ idx = ((count - 1) + tx->req) & tx->mask;
+ tx->info[idx].last = 1;
+ if (tx->wc_fifo == NULL)
+ myri10ge_submit_req(tx, tx->req_list, count);
+ else
+ myri10ge_submit_req_wc(tx, tx->req_list, count);
+ tx->pkt_start++;
+ if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
+ mgp->stop_queue++;
+ netif_stop_queue(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+
+abort_linearize:
+ /* Free any DMA resources we've alloced and clear out the skb
+ * slot so as to not trip up assertions, and to avoid a
+ * double-free if linearizing fails */
+
+ last_idx = (idx + 1) & tx->mask;
+ idx = tx->req & tx->mask;
+ tx->info[idx].skb = NULL;
+ do {
+ len = pci_unmap_len(&tx->info[idx], len);
+ if (len) {
+ if (tx->info[idx].skb != NULL)
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ pci_unmap_len_set(&tx->info[idx], len, 0);
+ tx->info[idx].skb = NULL;
+ }
+ idx = (idx + 1) & tx->mask;
+ } while (idx != last_idx);
+ if (skb_shinfo(skb)->tso_size) {
+ printk(KERN_ERR
+ "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
+ mgp->dev->name);
+ goto drop;
+ }
+
+ if (skb_linearize(skb, GFP_ATOMIC))
+ goto drop;
+
+ mgp->tx_linearized++;
+ goto again;
+
+drop:
+ dev_kfree_skb_any(skb);
+ mgp->stats.tx_dropped += 1;
+ return 0;
+
+}
+
+static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ return &mgp->stats;
+}
+
+static void myri10ge_set_multicast_list(struct net_device *dev)
+{
+ /* can be called from atomic contexts,
+ * pass 1 to force atomicity in myri10ge_send_cmd() */
+ myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1);
+}
+
+static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ int status;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ status = myri10ge_update_mac_address(mgp, sa->sa_data);
+ if (status != 0) {
+ printk(KERN_ERR
+ "myri10ge: %s: changing mac address failed with %d\n",
+ dev->name, status);
+ return status;
+ }
+
+ /* change the dev structure */
+ memcpy(dev->dev_addr, sa->sa_data, 6);
+ return 0;
+}
+
+static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ int error = 0;
+
+ if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
+ printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n",
+ dev->name, new_mtu);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: changing mtu from %d to %d\n",
+ dev->name, dev->mtu, new_mtu);
+ if (mgp->running) {
+ /* if we change the mtu on an active device, we must
+ * reset the device so the firmware sees the change */
+ myri10ge_close(dev);
+ dev->mtu = new_mtu;
+ myri10ge_open(dev);
+ } else
+ dev->mtu = new_mtu;
+
+ return error;
+}
+
+/*
+ * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
+ * Only do it if the bridge is a root port since we don't want to disturb
+ * any other device, except if forced with myri10ge_ecrc_enable > 1.
+ */
+
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE 0x005d
+
+static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *bridge = mgp->pdev->bus->self;
+ struct device *dev = &mgp->pdev->dev;
+ unsigned cap;
+ unsigned err_cap;
+ u16 val;
+ u8 ext_type;
+ int ret;
+
+ if (!myri10ge_ecrc_enable || !bridge)
+ return;
+
+ /* check that the bridge is a root port */
+ cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
+ ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
+ if (myri10ge_ecrc_enable > 1) {
+ struct pci_dev *old_bridge = bridge;
+
+ /* Walk the hierarchy up to the root port
+ * where ECRC has to be enabled */
+ do {
+ bridge = bridge->bus->self;
+ if (!bridge) {
+ dev_err(dev,
+ "Failed to find root port"
+ " to force ECRC\n");
+ return;
+ }
+ cap =
+ pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(bridge,
+ cap + PCI_CAP_FLAGS, &val);
+ ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+ } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
+
+ dev_info(dev,
+ "Forcing ECRC on non-root port %s"
+ " (enabling on root port %s)\n",
+ pci_name(old_bridge), pci_name(bridge));
+ } else {
+ dev_err(dev,
+ "Not enabling ECRC on non-root port %s\n",
+ pci_name(bridge));
+ return;
+ }
+ }
+
+ cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
+ /* nvidia ext cap is not always linked in ext cap chain */
+ if (!cap
+ && bridge->vendor == PCI_VENDOR_ID_NVIDIA
+ && bridge->device == PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE)
+ cap = 0x160;
+
+ if (!cap)
+ return;
+
+ ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
+ if (ret) {
+ dev_err(dev, "failed reading ext-conf-space of %s\n",
+ pci_name(bridge));
+ dev_err(dev, "\t pci=nommconf in use? "
+ "or buggy/incomplete/absent ACPI MCFG attr?\n");
+ return;
+ }
+ if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
+ return;
+
+ err_cap |= PCI_ERR_CAP_ECRC_GENE;
+ pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
+ dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+}
+
+/*
+ * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
+ * when the PCI-E Completion packets are aligned on an 8-byte
+ * boundary. Some PCI-E chip sets always align Completion packets; on
+ * the ones that do not, the alignment can be enforced by enabling
+ * ECRC generation (if supported).
+ *
+ * When PCI-E Completion packets are not aligned, it is actually more
+ * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
+ *
+ * If the driver can neither enable ECRC nor verify that it has
+ * already been enabled, then it must use a firmware image which works
+ * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it
+ * should also ensure that it never gives the device a Read-DMA which is
+ * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is
+ * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat)
+ * firmware image, and set tx.boundary to 4KB.
+ */
+
+#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
+
+static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *bridge = mgp->pdev->bus->self;
+
+ mgp->tx.boundary = 2048;
+ mgp->fw_name = myri10ge_fw_unaligned;
+
+ if (myri10ge_force_firmware == 0) {
+ myri10ge_enable_ecrc(mgp);
+
+ /* Check to see if the upstream bridge is known to
+ * provide aligned completions */
+ if (bridge
+ /* ServerWorks HT2000/HT1000 */
+ && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
+ && bridge->device ==
+ PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) {
+ dev_info(&mgp->pdev->dev,
+ "Assuming aligned completions (0x%x:0x%x)\n",
+ bridge->vendor, bridge->device);
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+ }
+ } else {
+ if (myri10ge_force_firmware == 1) {
+ dev_info(&mgp->pdev->dev,
+ "Assuming aligned completions (forced)\n");
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+ } else {
+ dev_info(&mgp->pdev->dev,
+ "Assuming unaligned completions (forced)\n");
+ mgp->tx.boundary = 2048;
+ mgp->fw_name = myri10ge_fw_unaligned;
+ }
+ }
+ if (myri10ge_fw_name != NULL) {
+ dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
+ myri10ge_fw_name);
+ mgp->fw_name = myri10ge_fw_name;
+ }
+}
+
+static void myri10ge_save_state(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int cap;
+
+ pci_save_state(pdev);
+ /* now save PCIe and MSI state that Linux will not
+ * save for us */
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &mgp->devctl);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pci_read_config_word(pdev, cap + PCI_MSI_FLAGS, &mgp->msi_flags);
+}
+
+static void myri10ge_restore_state(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int cap;
+
+ /* restore PCIe and MSI state that linux will not */
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_write_config_dword(pdev, cap + PCI_CAP_ID_EXP, mgp->devctl);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pci_write_config_word(pdev, cap + PCI_MSI_FLAGS, mgp->msi_flags);
+
+ pci_restore_state(pdev);
+}
+
+#ifdef CONFIG_PM
+
+static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return -EINVAL;
+ netdev = mgp->dev;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ printk(KERN_INFO "myri10ge: closing %s\n", netdev->name);
+ rtnl_lock();
+ myri10ge_close(netdev);
+ rtnl_unlock();
+ }
+ myri10ge_dummy_rdma(mgp, 0);
+ free_irq(pdev->irq, mgp);
+ myri10ge_save_state(mgp);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int myri10ge_resume(struct pci_dev *pdev)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+ int status;
+ u16 vendor;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return -EINVAL;
+ netdev = mgp->dev;
+ pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
+ msleep(5); /* give card time to respond */
+ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+ if (vendor == 0xffff) {
+ printk(KERN_ERR "myri10ge: %s: device disappeared!\n",
+ mgp->dev->name);
+ return -EIO;
+ }
+ myri10ge_restore_state(mgp);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
+
+ status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ,
+ netdev->name, mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to allocate IRQ\n");
+ goto abort_with_msi;
+ }
+
+ myri10ge_reset(mgp);
+ myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+
+ /* Save configuration space to be restored if the
+ * nic resets due to a parity error */
+ myri10ge_save_state(mgp);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ myri10ge_open(netdev);
+ rtnl_unlock();
+ }
+ netif_device_attach(netdev);
+
+ return 0;
+
+abort_with_msi:
+ return -EIO;
+
+}
+
+#endif /* CONFIG_PM */
+
+static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int vs = mgp->vendor_specific_offset;
+ u32 reboot;
+
+ /*enter read32 mode */
+ pci_write_config_byte(pdev, vs + 0x10, 0x3);
+
+ /*read REBOOT_STATUS (0xfffffff0) */
+ pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
+ pci_read_config_dword(pdev, vs + 0x14, &reboot);
+ return reboot;
+}
+
+/*
+ * This watchdog is used to check whether the board has suffered
+ * from a parity error and needs to be recovered.
+ */
+static void myri10ge_watchdog(void *arg)
+{
+ struct myri10ge_priv *mgp = arg;
+ u32 reboot;
+ int status;
+ u16 cmd, vendor;
+
+ mgp->watchdog_resets++;
+ pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_MASTER) == 0) {
+ /* Bus master DMA disabled? Check to see
+ * if the card rebooted due to a parity error
+ * For now, just report it */
+ reboot = myri10ge_read_reboot(mgp);
+ printk(KERN_ERR
+ "myri10ge: %s: NIC rebooted (0x%x), resetting\n",
+ mgp->dev->name, reboot);
+ /*
+ * A rebooted nic will come back with config space as
+ * it was after power was applied to PCIe bus.
+ * Attempt to restore config space which was saved
+ * when the driver was loaded, or the last time the
+ * nic was resumed from power saving mode.
+ */
+ myri10ge_restore_state(mgp);
+ } else {
+ /* if we get back -1's from our slot, perhaps somebody
+ * powered off our card. Don't try to reset it in
+ * this case */
+ if (cmd == 0xffff) {
+ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+ if (vendor == 0xffff) {
+ printk(KERN_ERR
+ "myri10ge: %s: device disappeared!\n",
+ mgp->dev->name);
+ return;
+ }
+ }
+ /* Perhaps it is a software error. Try to reset */
+
+ printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
+ mgp->dev->name);
+ printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
+ mgp->dev->name, mgp->tx.req, mgp->tx.done,
+ mgp->tx.pkt_start, mgp->tx.pkt_done,
+ (int)ntohl(mgp->fw_stats->send_done_count));
+ msleep(2000);
+ printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
+ mgp->dev->name, mgp->tx.req, mgp->tx.done,
+ mgp->tx.pkt_start, mgp->tx.pkt_done,
+ (int)ntohl(mgp->fw_stats->send_done_count));
+ }
+ rtnl_lock();
+ myri10ge_close(mgp->dev);
+ status = myri10ge_load_firmware(mgp);
+ if (status != 0)
+ printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
+ mgp->dev->name);
+ else
+ myri10ge_open(mgp->dev);
+ rtnl_unlock();
+}
+
+/*
+ * We use our own timer routine rather than relying upon
+ * netdev->tx_timeout because we have a very large hardware transmit
+ * queue. Due to the large queue, the netdev->tx_timeout function
+ * cannot detect a NIC with a parity error in a timely fashion if the
+ * NIC is lightly loaded.
+ */
+static void myri10ge_watchdog_timer(unsigned long arg)
+{
+ struct myri10ge_priv *mgp;
+
+ mgp = (struct myri10ge_priv *)arg;
+ if (mgp->tx.req != mgp->tx.done &&
+ mgp->tx.done == mgp->watchdog_tx_done)
+ /* nic seems like it might be stuck.. */
+ schedule_work(&mgp->watchdog_work);
+ else
+ /* rearm timer */
+ mod_timer(&mgp->watchdog_timer,
+ jiffies + myri10ge_watchdog_timeout * HZ);
+
+ mgp->watchdog_tx_done = mgp->tx.done;
+}
+
+static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct myri10ge_priv *mgp;
+ struct device *dev = &pdev->dev;
+ size_t bytes;
+ int i;
+ int status = -ENXIO;
+ int cap;
+ int dac_enabled;
+ u16 val;
+
+ netdev = alloc_etherdev(sizeof(*mgp));
+ if (netdev == NULL) {
+ dev_err(dev, "Could not allocate ethernet device\n");
+ return -ENOMEM;
+ }
+
+ mgp = netdev_priv(netdev);
+ memset(mgp, 0, sizeof(*mgp));
+ mgp->dev = netdev;
+ mgp->pdev = pdev;
+ mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
+ mgp->pause = myri10ge_flow_control;
+ mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+ init_waitqueue_head(&mgp->down_wq);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "pci_enable_device call failed\n");
+ status = -ENODEV;
+ goto abort_with_netdev;
+ }
+ myri10ge_select_firmware(mgp);
+
+ /* Find the vendor-specific cap so we can check
+ * the reboot register later on */
+ mgp->vendor_specific_offset
+ = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+ /* Set our max read request to 4KB */
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (cap < 64) {
+ dev_err(&pdev->dev, "Bad PCI_CAP_ID_EXP location %d\n", cap);
+ goto abort_with_netdev;
+ }
+ status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d reading PCI_EXP_DEVCTL\n",
+ status);
+ goto abort_with_netdev;
+ }
+ val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
+ status = pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, val);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
+ status);
+ goto abort_with_netdev;
+ }
+
+ pci_set_master(pdev);
+ dac_enabled = 1;
+ status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (status != 0) {
+ dac_enabled = 0;
+ dev_err(&pdev->dev,
+ "64-bit pci address mask was refused, trying 32-bit");
+ status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ }
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
+ goto abort_with_netdev;
+ }
+ mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ &mgp->cmd_bus, GFP_KERNEL);
+ if (mgp->cmd == NULL)
+ goto abort_with_netdev;
+
+ mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+ &mgp->fw_stats_bus, GFP_KERNEL);
+ if (mgp->fw_stats == NULL)
+ goto abort_with_cmd;
+
+ mgp->board_span = pci_resource_len(pdev, 0);
+ mgp->iomem_base = pci_resource_start(pdev, 0);
+ mgp->mtrr = -1;
+#ifdef CONFIG_MTRR
+ mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
+ MTRR_TYPE_WRCOMB, 1);
+#endif
+ /* Hack. need to get rid of these magic numbers */
+ mgp->sram_size =
+ 2 * 1024 * 1024 - (2 * (48 * 1024) + (32 * 1024)) - 0x100;
+ if (mgp->sram_size > mgp->board_span) {
+ dev_err(&pdev->dev, "board span %ld bytes too small\n",
+ mgp->board_span);
+ goto abort_with_wc;
+ }
+ mgp->sram = ioremap(mgp->iomem_base, mgp->board_span);
+ if (mgp->sram == NULL) {
+ dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
+ mgp->board_span, mgp->iomem_base);
+ status = -ENXIO;
+ goto abort_with_wc;
+ }
+ memcpy_fromio(mgp->eeprom_strings,
+ mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE,
+ MYRI10GE_EEPROM_STRINGS_SIZE);
+ memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
+ status = myri10ge_read_mac_addr(mgp);
+ if (status)
+ goto abort_with_ioremap;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ netdev->dev_addr[i] = mgp->mac_addr[i];
+
+ /* allocate rx done ring */
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &mgp->rx_done.bus, GFP_KERNEL);
+ if (mgp->rx_done.entry == NULL)
+ goto abort_with_ioremap;
+ memset(mgp->rx_done.entry, 0, bytes);
+
+ status = myri10ge_load_firmware(mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to load firmware\n");
+ goto abort_with_rx_done;
+ }
+
+ status = myri10ge_reset(mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed reset\n");
+ goto abort_with_firmware;
+ }
+
+ if (myri10ge_msi) {
+ status = pci_enable_msi(pdev);
+ if (status != 0)
+ dev_err(&pdev->dev,
+ "Error %d setting up MSI; falling back to xPIC\n",
+ status);
+ else
+ mgp->msi_enabled = 1;
+ }
+
+ status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ,
+ netdev->name, mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to allocate IRQ\n");
+ goto abort_with_firmware;
+ }
+
+ pci_set_drvdata(pdev, mgp);
+ if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
+ myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+ if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
+ myri10ge_initial_mtu = 68;
+ netdev->mtu = myri10ge_initial_mtu;
+ netdev->open = myri10ge_open;
+ netdev->stop = myri10ge_close;
+ netdev->hard_start_xmit = myri10ge_xmit;
+ netdev->get_stats = myri10ge_get_stats;
+ netdev->base_addr = mgp->iomem_base;
+ netdev->irq = pdev->irq;
+ netdev->change_mtu = myri10ge_change_mtu;
+ netdev->set_multicast_list = myri10ge_set_multicast_list;
+ netdev->set_mac_address = myri10ge_set_mac_address;
+ netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ if (dac_enabled)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->poll = myri10ge_poll;
+ netdev->weight = myri10ge_napi_weight;
+
+ /* Save configuration space to be restored if the
+ * nic resets due to a parity error */
+ myri10ge_save_state(mgp);
+ /* Restore state immediately since pci_save_msi_state disables MSI */
+ myri10ge_restore_state(mgp);
+
+ /* Setup the watchdog timer */
+ setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
+ (unsigned long)mgp);
+
+ SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+ INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+ status = register_netdev(netdev);
+ if (status != 0) {
+ dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
+ goto abort_with_irq;
+ }
+
+ printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+ netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"),
+ pdev->irq, mgp->tx.boundary, mgp->fw_name,
+ (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
+
+ return 0;
+
+abort_with_irq:
+ free_irq(pdev->irq, mgp);
+ if (mgp->msi_enabled)
+ pci_disable_msi(pdev);
+
+abort_with_firmware:
+ myri10ge_dummy_rdma(mgp, 0);
+
+abort_with_rx_done:
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ dma_free_coherent(&pdev->dev, bytes,
+ mgp->rx_done.entry, mgp->rx_done.bus);
+
+abort_with_ioremap:
+ iounmap(mgp->sram);
+
+abort_with_wc:
+#ifdef CONFIG_MTRR
+ if (mgp->mtrr >= 0)
+ mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+ mgp->fw_stats, mgp->fw_stats_bus);
+
+abort_with_cmd:
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ mgp->cmd, mgp->cmd_bus);
+
+abort_with_netdev:
+
+ free_netdev(netdev);
+ return status;
+}
+
+/*
+ * myri10ge_remove
+ *
+ * Does what is necessary to shutdown one Myrinet device. Called
+ * once for each Myrinet card by the kernel when a module is
+ * unloaded.
+ */
+static void myri10ge_remove(struct pci_dev *pdev)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+ size_t bytes;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return;
+
+ flush_scheduled_work();
+ netdev = mgp->dev;
+ unregister_netdev(netdev);
+ free_irq(pdev->irq, mgp);
+ if (mgp->msi_enabled)
+ pci_disable_msi(pdev);
+
+ myri10ge_dummy_rdma(mgp, 0);
+
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ dma_free_coherent(&pdev->dev, bytes,
+ mgp->rx_done.entry, mgp->rx_done.bus);
+
+ iounmap(mgp->sram);
+
+#ifdef CONFIG_MTRR
+ if (mgp->mtrr >= 0)
+ mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+ mgp->fw_stats, mgp->fw_stats_bus);
+
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ mgp->cmd, mgp->cmd_bus);
+
+ free_netdev(netdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
+
+static struct pci_device_id myri10ge_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
+ {0},
+};
+
+static struct pci_driver myri10ge_driver = {
+ .name = "myri10ge",
+ .probe = myri10ge_probe,
+ .remove = myri10ge_remove,
+ .id_table = myri10ge_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = myri10ge_suspend,
+ .resume = myri10ge_resume,
+#endif
+};
+
+static __init int myri10ge_init_module(void)
+{
+ printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
+ MYRI10GE_VERSION_STR);
+ return pci_register_driver(&myri10ge_driver);
+}
+
+module_init(myri10ge_init_module);
+
+static __exit void myri10ge_cleanup_module(void)
+{
+ pci_unregister_driver(&myri10ge_driver);
+}
+
+module_exit(myri10ge_cleanup_module);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
new file mode 100644
index 000000000000..0a6cae6cb186
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -0,0 +1,205 @@
+#ifndef __MYRI10GE_MCP_H__
+#define __MYRI10GE_MCP_H__
+
+#define MXGEFW_VERSION_MAJOR 1
+#define MXGEFW_VERSION_MINOR 4
+
+/* 8 Bytes */
+struct mcp_dma_addr {
+ u32 high;
+ u32 low;
+};
+
+/* 4 Bytes */
+struct mcp_slot {
+ u16 checksum;
+ u16 length;
+};
+
+/* 64 Bytes */
+struct mcp_cmd {
+ u32 cmd;
+ u32 data0; /* will be low portion if data > 32 bits */
+ /* 8 */
+ u32 data1; /* will be high portion if data > 32 bits */
+ u32 data2; /* currently unused.. */
+ /* 16 */
+ struct mcp_dma_addr response_addr;
+ /* 24 */
+ u8 pad[40];
+};
+
+/* 8 Bytes */
+struct mcp_cmd_response {
+ u32 data;
+ u32 result;
+};
+
+/*
+ * flags used in mcp_kreq_ether_send_t:
+ *
+ * The SMALL flag is only needed in the first segment. It is raised
+ * for packets that are total less or equal 512 bytes.
+ *
+ * The CKSUM flag must be set in all segments.
+ *
+ * The PADDED flags is set if the packet needs to be padded, and it
+ * must be set for all segments.
+ *
+ * The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative
+ * length of all previous segments was odd.
+ */
+
+#define MXGEFW_FLAGS_SMALL 0x1
+#define MXGEFW_FLAGS_TSO_HDR 0x1
+#define MXGEFW_FLAGS_FIRST 0x2
+#define MXGEFW_FLAGS_ALIGN_ODD 0x4
+#define MXGEFW_FLAGS_CKSUM 0x8
+#define MXGEFW_FLAGS_TSO_LAST 0x8
+#define MXGEFW_FLAGS_NO_TSO 0x10
+#define MXGEFW_FLAGS_TSO_CHOP 0x10
+#define MXGEFW_FLAGS_TSO_PLD 0x20
+
+#define MXGEFW_SEND_SMALL_SIZE 1520
+#define MXGEFW_MAX_MTU 9400
+
+union mcp_pso_or_cumlen {
+ u16 pseudo_hdr_offset;
+ u16 cum_len;
+};
+
+#define MXGEFW_MAX_SEND_DESC 12
+#define MXGEFW_PAD 2
+
+/* 16 Bytes */
+struct mcp_kreq_ether_send {
+ u32 addr_high;
+ u32 addr_low;
+ u16 pseudo_hdr_offset;
+ u16 length;
+ u8 pad;
+ u8 rdma_count;
+ u8 cksum_offset; /* where to start computing cksum */
+ u8 flags; /* as defined above */
+};
+
+/* 8 Bytes */
+struct mcp_kreq_ether_recv {
+ u32 addr_high;
+ u32 addr_low;
+};
+
+/* Commands */
+
+#define MXGEFW_CMD_OFFSET 0xf80000
+
+enum myri10ge_mcp_cmd_type {
+ MXGEFW_CMD_NONE = 0,
+ /* Reset the mcp, it is left in a safe state, waiting
+ * for the driver to set all its parameters */
+ MXGEFW_CMD_RESET,
+
+ /* get the version number of the current firmware..
+ * (may be available in the eeprom strings..? */
+ MXGEFW_GET_MCP_VERSION,
+
+ /* Parameters which must be set by the driver before it can
+ * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next
+ * MXGEFW_CMD_RESET is issued */
+
+ MXGEFW_CMD_SET_INTRQ_DMA,
+ MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
+ MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
+
+ /* Parameters which refer to lanai SRAM addresses where the
+ * driver must issue PIO writes for various things */
+
+ MXGEFW_CMD_GET_SEND_OFFSET,
+ MXGEFW_CMD_GET_SMALL_RX_OFFSET,
+ MXGEFW_CMD_GET_BIG_RX_OFFSET,
+ MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
+ MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
+
+ /* Parameters which refer to rings stored on the MCP,
+ * and whose size is controlled by the mcp */
+
+ MXGEFW_CMD_GET_SEND_RING_SIZE, /* in bytes */
+ MXGEFW_CMD_GET_RX_RING_SIZE, /* in bytes */
+
+ /* Parameters which refer to rings stored in the host,
+ * and whose size is controlled by the host. Note that
+ * all must be physically contiguous and must contain
+ * a power of 2 number of entries. */
+
+ MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */
+
+ /* command to bring ethernet interface up. Above parameters
+ * (plus mtu & mac address) must have been exchanged prior
+ * to issuing this command */
+ MXGEFW_CMD_ETHERNET_UP,
+
+ /* command to bring ethernet interface down. No further sends
+ * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP
+ * is issued, and all interrupt queues must be flushed prior
+ * to ack'ing this command */
+
+ MXGEFW_CMD_ETHERNET_DOWN,
+
+ /* commands the driver may issue live, without resetting
+ * the nic. Note that increasing the mtu "live" should
+ * only be done if the driver has already supplied buffers
+ * sufficiently large to handle the new mtu. Decreasing
+ * the mtu live is safe */
+
+ MXGEFW_CMD_SET_MTU,
+ MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */
+ MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */
+ MXGEFW_CMD_SET_STATS_DMA,
+
+ MXGEFW_ENABLE_PROMISC,
+ MXGEFW_DISABLE_PROMISC,
+ MXGEFW_SET_MAC_ADDRESS,
+
+ MXGEFW_ENABLE_FLOW_CONTROL,
+ MXGEFW_DISABLE_FLOW_CONTROL,
+
+ /* do a DMA test
+ * data0,data1 = DMA address
+ * data2 = RDMA length (MSH), WDMA length (LSH)
+ * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
+ */
+ MXGEFW_DMA_TEST
+};
+
+enum myri10ge_mcp_cmd_status {
+ MXGEFW_CMD_OK = 0,
+ MXGEFW_CMD_UNKNOWN,
+ MXGEFW_CMD_ERROR_RANGE,
+ MXGEFW_CMD_ERROR_BUSY,
+ MXGEFW_CMD_ERROR_EMPTY,
+ MXGEFW_CMD_ERROR_CLOSED,
+ MXGEFW_CMD_ERROR_HASH_ERROR,
+ MXGEFW_CMD_ERROR_BAD_PORT,
+ MXGEFW_CMD_ERROR_RESOURCES
+};
+
+/* 40 Bytes */
+struct mcp_irq_data {
+ u32 send_done_count;
+
+ u32 link_up;
+ u32 dropped_link_overflow;
+ u32 dropped_link_error_or_filtered;
+ u32 dropped_runt;
+ u32 dropped_overrun;
+ u32 dropped_no_small_buffer;
+ u32 dropped_no_big_buffer;
+ u32 rdma_tags_available;
+
+ u8 tx_stopped;
+ u8 link_down;
+ u8 stats_updated;
+ u8 valid;
+};
+
+#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
new file mode 100644
index 000000000000..487f7792fd46
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -0,0 +1,58 @@
+#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
+#define __MYRI10GE_MCP_GEN_HEADER_H__
+
+/* this file define a standard header used as a first entry point to
+ * exchange information between firmware/driver and driver. The
+ * header structure can be anywhere in the mcp. It will usually be in
+ * the .data section, because some fields needs to be initialized at
+ * compile time.
+ * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
+ * contains the location of the header.
+ *
+ * Typically a MCP will start with the following:
+ * .text
+ * .space 52 ! to help catch MEMORY_INT errors
+ * bt start ! jump to real code
+ * nop
+ * .long _gen_mcp_header
+ *
+ * The source will have a definition like:
+ *
+ * mcp_gen_header_t gen_mcp_header = {
+ * .header_length = sizeof(mcp_gen_header_t),
+ * .mcp_type = MCP_TYPE_XXX,
+ * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
+ * .mcp_globals = (unsigned)&Globals
+ * };
+ */
+
+#define MCP_HEADER_PTR_OFFSET 0x3c
+
+#define MCP_TYPE_MX 0x4d582020 /* "MX " */
+#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
+#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
+#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
+
+struct mcp_gen_header {
+ /* the first 4 fields are filled at compile time */
+ unsigned header_length;
+ unsigned mcp_type;
+ char version[128];
+ unsigned mcp_globals; /* pointer to mcp-type specific structure */
+
+ /* filled by the MCP at run-time */
+ unsigned sram_size;
+ unsigned string_specs; /* either the original STRING_SPECS or a superset */
+ unsigned string_specs_len;
+
+ /* Fields above this comment are guaranteed to be present.
+ *
+ * Fields below this comment are extensions added in later versions
+ * of this struct, drivers should compare the header_length against
+ * offsetof(field) to check wether a given MCP implements them.
+ *
+ * Never remove any field. Keep everything naturally align.
+ */
+};
+
+#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index b32765215f75..963a11fa9fe2 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -829,7 +829,7 @@ that the ne2k probe is the last 8390 based probe to take place (as it
is at boot) and so the probe will get confused by any other 8390 cards.
ISA device autoprobes on a running machine are not recommended anyway. */
-int init_module(void)
+int __init init_module(void)
{
int this_dev, found = 0;
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index 2aa7b77f84f8..eebf5f02b476 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -780,7 +780,7 @@ MODULE_PARM_DESC(bad, "(ignored)");
/* Module code fixed by David Weinehall */
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 66e74f740261..bf58db29e2ed 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -107,7 +107,7 @@ static int init_netconsole(void)
if(!configured) {
printk("netconsole: not configured, aborting\n");
- return -EINVAL;
+ return 0;
}
if(netpoll_setup(&np))
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 4260c2128f47..a8f6bfc96fd2 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1204,7 +1204,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
dev->last_rx = jiffies;
lp->linux_stats.rx_packets++;
- lp->linux_stats.rx_bytes += skb->len;
+ lp->linux_stats.rx_bytes += pkt_len;
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
continue;
} else {
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d090df413049..661bfe54ff5d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -12,7 +12,7 @@
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
pcnet_cs.c 1.153 2003/11/09 18:53:09
-
+
The network driver code is based on Donald Becker's NE2000 code:
Written 1992,1993 by Donald Becker.
@@ -146,7 +146,7 @@ typedef struct hw_info_t {
#define MII_PHYID_REG2 0x03
static hw_info_t hw_info[] = {
- { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
+ { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
{ /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
{ /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
{ /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
@@ -193,7 +193,7 @@ static hw_info_t hw_info[] = {
{ /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
{ /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
HAS_MISC_REG | HAS_IBM_MISC },
- { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
+ { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
HAS_MISC_REG | HAS_IBM_MISC },
{ /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
{ /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
@@ -330,7 +330,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
for (j = 0; j < 6; j++)
dev->dev_addr[j] = readb(base + (j<<1));
}
-
+
iounmap(virt);
j = pcmcia_release_window(link->win);
if (j != CS_SUCCESS)
@@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link)
if (link->io.NumPorts2 > 0) {
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
+ link->irq.Attributes =
IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
}
} else {
@@ -543,19 +543,19 @@ static int pcnet_config(struct pcmcia_device *link)
manfid = le16_to_cpu(buf[0]);
prodid = le16_to_cpu(buf[1]);
}
-
+
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (last_ret == CS_SUCCESS) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
cistpl_io_t *io = &(parse.cftable_entry.io);
-
+
if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
cfg->index == 0 || cfg->io.nwin == 0)
goto next_entry;
-
+
link->conf.ConfigIndex = cfg->index;
/* For multifunction cards, by convention, we configure the
network function with window 0, and serial with window 1 */
@@ -584,7 +584,7 @@ static int pcnet_config(struct pcmcia_device *link)
}
CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
-
+
if (link->io.NumPorts2 == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
@@ -592,7 +592,7 @@ static int pcnet_config(struct pcmcia_device *link)
if ((manfid == MANFID_IBM) &&
(prodid == PRODID_IBM_HOME_AND_AWAY))
link->conf.ConfigIndex |= 0x10;
-
+
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -614,7 +614,7 @@ static int pcnet_config(struct pcmcia_device *link)
hw_info = get_ax88190(link);
if (hw_info == NULL)
hw_info = get_hwired(link);
-
+
if (hw_info == NULL) {
printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
" address for io base %#3lx\n", dev->base_addr);
@@ -631,7 +631,7 @@ static int pcnet_config(struct pcmcia_device *link)
info->flags &= ~USE_BIG_BUF;
if (!use_big_buf)
info->flags &= ~USE_BIG_BUF;
-
+
if (info->flags & USE_BIG_BUF) {
start_pg = SOCKET_START_PG;
stop_pg = SOCKET_STOP_PG;
@@ -929,7 +929,7 @@ static void set_misc_reg(struct net_device *dev)
kio_addr_t nic_base = dev->base_addr;
pcnet_dev_t *info = PRIV(dev);
u_char tmp;
-
+
if (info->flags & HAS_MISC_REG) {
tmp = inb_p(nic_base + PCNET_MISC) & ~3;
if (dev->if_port == 2)
@@ -1022,7 +1022,7 @@ static int pcnet_close(struct net_device *dev)
ei_close(dev);
free_irq(dev->irq, dev);
-
+
link->open--;
netif_stop_queue(dev);
del_timer_sync(&info->watchdog);
@@ -1054,12 +1054,12 @@ static void pcnet_reset_8390(struct net_device *dev)
udelay(100);
}
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
-
+
if (i == 100)
printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
dev->name);
set_misc_reg(dev);
-
+
} /* pcnet_reset_8390 */
/*====================================================================*/
@@ -1233,7 +1233,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
dev->name, ei_status.dmaing, ei_status.irqlock);
return;
}
-
+
ei_status.dmaing |= 0x01;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
@@ -1458,7 +1458,7 @@ static void shmem_get_8390_hdr(struct net_device *dev,
void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
+ (ring_page << 8)
- (ei_status.rx_start_page << 8);
-
+
copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
/* Fix for big endian systems */
hdr->count = le16_to_cpu(hdr->count);
@@ -1473,7 +1473,7 @@ static void shmem_block_input(struct net_device *dev, int count,
unsigned long offset = (TX_PAGES<<8) + ring_offset
- (ei_status.rx_start_page << 8);
char *buf = skb->data;
-
+
if (offset + count > ei_status.priv) {
/* We must wrap the input move. */
int semi_count = ei_status.priv - offset;
@@ -1541,7 +1541,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
info->base = NULL; link->win = NULL;
goto failed;
}
-
+
ei_status.mem = info->base + offset;
ei_status.priv = req.Size;
dev->mem_start = (u_long)ei_status.mem;
@@ -1768,6 +1768,8 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"),
+ PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
+ 0xb4be14e3, 0x43ac239b, 0x0877b627),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fa39b944bc46..cda3e53d6917 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -45,5 +45,11 @@ config CICADA_PHY
---help---
Currently supports the cis8204
+config SMSC_PHY
+ tristate "Drivers for SMSC PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the LAN83C185 PHY
+
endmenu
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e4116a5fbb4c..d9614134cc06 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
+obj-$(CONFIG_SMSC_PHY) += smsc.o
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
new file mode 100644
index 000000000000..25e31fb5cb31
--- /dev/null
+++ b/drivers/net/phy/smsc.c
@@ -0,0 +1,101 @@
+/*
+ * drivers/net/phy/smsc.c
+ *
+ * Driver for SMSC PHYs
+ *
+ * Author: Herbert Valerio Riedel
+ *
+ * Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+
+
+static int lan83c185_config_intr(struct phy_device *phydev)
+{
+ int rc = phy_write (phydev, MII_LAN83C185_IM,
+ ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
+ ? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
+ : 0));
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan83c185_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read (phydev, MII_LAN83C185_ISF);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan83c185_config_init(struct phy_device *phydev)
+{
+ return lan83c185_ack_interrupt (phydev);
+}
+
+
+static struct phy_driver lan83c185_driver = {
+ .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
+ .phy_id_mask = 0xfffffff0,
+ .name = "SMSC LAN83C185",
+
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+ /* basic functions */
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .config_init = lan83c185_config_init,
+
+ /* IRQ related */
+ .ack_interrupt = lan83c185_ack_interrupt,
+ .config_intr = lan83c185_config_intr,
+
+ .driver = { .owner = THIS_MODULE, }
+};
+
+static int __init smsc_init(void)
+{
+ return phy_driver_register (&lan83c185_driver);
+}
+
+static void __exit smsc_exit(void)
+{
+ phy_driver_unregister (&lan83c185_driver);
+}
+
+MODULE_DESCRIPTION("SMSC PHY driver");
+MODULE_AUTHOR("Herbert Valerio Riedel");
+MODULE_LICENSE("GPL");
+
+module_init(smsc_init);
+module_exit(smsc_exit);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 475dc930380f..0d101a18026a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -861,6 +861,9 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
* give dev_queue_xmit something it can free.
*/
skb2 = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb2 == NULL)
+ goto abort;
}
ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0ad3310290f1..9945cc6b8d90 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -184,6 +184,7 @@ static const struct {
static struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
{ PCI_DEVICE(0x16ec, 0x0116), },
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 00179bc3437f..0ef525899566 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -167,6 +167,7 @@ typedef struct _XENA_dev_config {
u8 unused4[0x08];
u64 gpio_int_reg;
+#define GPIO_INT_REG_DP_ERR_INT BIT(0)
#define GPIO_INT_REG_LINK_DOWN BIT(1)
#define GPIO_INT_REG_LINK_UP BIT(2)
u64 gpio_int_mask;
@@ -187,7 +188,7 @@ typedef struct _XENA_dev_config {
/* PIC Control registers */
u64 pic_control;
#define PIC_CNTL_RX_ALARM_MAP_1 BIT(0)
-#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,4)
+#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
u64 swapper_ctrl;
#define SWAPPER_CTRL_PIF_R_FE BIT(0)
@@ -267,6 +268,21 @@ typedef struct _XENA_dev_config {
/* General Configuration */
u64 mdio_control;
+#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
+#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
+#define MDIO_MMD_PMA_DEV_ADDR 0x1
+#define MDIO_MMD_PMD_DEV_ADDR 0x1
+#define MDIO_MMD_WIS_DEV_ADDR 0x2
+#define MDIO_MMD_PCS_DEV_ADDR 0x3
+#define MDIO_MMD_PHYXS_DEV_ADDR 0x4
+#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
+#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
+#define MDIO_OP(val) vBIT(val, 60, 2)
+#define MDIO_OP_ADDR_TRANS 0x0
+#define MDIO_OP_WRITE_TRANS 0x1
+#define MDIO_OP_READ_POST_INC_TRANS 0x2
+#define MDIO_OP_READ_TRANS 0x3
+#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
u64 dtx_control;
@@ -284,9 +300,13 @@ typedef struct _XENA_dev_config {
u64 gpio_control;
#define GPIO_CTRL_GPIO_0 BIT(8)
u64 misc_control;
+#define EXT_REQ_EN BIT(1)
#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
- u8 unused7_1[0x240 - 0x208];
+ u8 unused7_1[0x230 - 0x208];
+
+ u64 pic_control2;
+ u64 ini_dperr_ctrl;
u64 wreq_split_mask;
#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
@@ -493,6 +513,7 @@ typedef struct _XENA_dev_config {
#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
+#define PRC_CTRL_GROUP_READS BIT(38)
#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
u64 prc_alarm_action;
@@ -541,7 +562,12 @@ typedef struct _XENA_dev_config {
#define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
#define RX_PA_CFG_IGNORE_L2_ERR BIT(6)
- u8 unused12[0x700 - 0x1D8];
+ u64 unused_11_1;
+
+ u64 ring_bump_counter1;
+ u64 ring_bump_counter2;
+
+ u8 unused12[0x700 - 0x1F0];
u64 rxdma_debug_ctrl;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 79208f434ac1..cac9fdd2e1d5 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -26,15 +26,22 @@
*
* The module loadable parameters that are supported by the driver and a brief
* explaination of all the variables.
+ *
* rx_ring_num : This can be used to program the number of receive rings used
* in the driver.
- * rx_ring_sz: This defines the number of descriptors each ring can have. This
- * is also an array of size 8.
+ * rx_ring_sz: This defines the number of receive blocks each ring can have.
+ * This is also an array of size 8.
* rx_ring_mode: This defines the operation mode of all 8 rings. The valid
* values are 1, 2 and 3.
* tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
* tx_fifo_len: This too is an array of 8. Each element defines the number of
* Tx descriptors that can be associated with each corresponding FIFO.
+ * intr_type: This defines the type of interrupt. The values can be 0(INTA),
+ * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
+ * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
+ * Possible values '1' for enable '0' for disable. Default is '0'
+ * lro_max_pkts: This parameter defines maximum number of packets can be
+ * aggregated as a single large packet
************************************************************************/
#include <linux/config.h>
@@ -70,7 +77,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.11.2"
+#define DRV_VERSION "2.0.14.2"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -106,18 +113,14 @@ static inline int RXD_IS_UP2DT(RxD_t *rxdp)
#define LOW 2
static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
{
- int level = 0;
mac_info_t *mac_control;
mac_control = &sp->mac_control;
- if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
- level = LOW;
- if (rxb_size <= rxd_count[sp->rxd_mode]) {
- level = PANIC;
- }
- }
-
- return level;
+ if (rxb_size <= rxd_count[sp->rxd_mode])
+ return PANIC;
+ else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
+ return LOW;
+ return 0;
}
/* Ethtool related variables and Macros. */
@@ -136,7 +139,11 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"tmac_mcst_frms"},
{"tmac_bcst_frms"},
{"tmac_pause_ctrl_frms"},
+ {"tmac_ttl_octets"},
+ {"tmac_ucst_frms"},
+ {"tmac_nucst_frms"},
{"tmac_any_err_frms"},
+ {"tmac_ttl_less_fb_octets"},
{"tmac_vld_ip_octets"},
{"tmac_vld_ip"},
{"tmac_drop_ip"},
@@ -151,13 +158,27 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_vld_mcst_frms"},
{"rmac_vld_bcst_frms"},
{"rmac_in_rng_len_err_frms"},
+ {"rmac_out_rng_len_err_frms"},
{"rmac_long_frms"},
{"rmac_pause_ctrl_frms"},
+ {"rmac_unsup_ctrl_frms"},
+ {"rmac_ttl_octets"},
+ {"rmac_accepted_ucst_frms"},
+ {"rmac_accepted_nucst_frms"},
{"rmac_discarded_frms"},
+ {"rmac_drop_events"},
+ {"rmac_ttl_less_fb_octets"},
+ {"rmac_ttl_frms"},
{"rmac_usized_frms"},
{"rmac_osized_frms"},
{"rmac_frag_frms"},
{"rmac_jabber_frms"},
+ {"rmac_ttl_64_frms"},
+ {"rmac_ttl_65_127_frms"},
+ {"rmac_ttl_128_255_frms"},
+ {"rmac_ttl_256_511_frms"},
+ {"rmac_ttl_512_1023_frms"},
+ {"rmac_ttl_1024_1518_frms"},
{"rmac_ip"},
{"rmac_ip_octets"},
{"rmac_hdr_err_ip"},
@@ -166,12 +187,82 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_tcp"},
{"rmac_udp"},
{"rmac_err_drp_udp"},
+ {"rmac_xgmii_err_sym"},
+ {"rmac_frms_q0"},
+ {"rmac_frms_q1"},
+ {"rmac_frms_q2"},
+ {"rmac_frms_q3"},
+ {"rmac_frms_q4"},
+ {"rmac_frms_q5"},
+ {"rmac_frms_q6"},
+ {"rmac_frms_q7"},
+ {"rmac_full_q0"},
+ {"rmac_full_q1"},
+ {"rmac_full_q2"},
+ {"rmac_full_q3"},
+ {"rmac_full_q4"},
+ {"rmac_full_q5"},
+ {"rmac_full_q6"},
+ {"rmac_full_q7"},
{"rmac_pause_cnt"},
+ {"rmac_xgmii_data_err_cnt"},
+ {"rmac_xgmii_ctrl_err_cnt"},
{"rmac_accepted_ip"},
{"rmac_err_tcp"},
+ {"rd_req_cnt"},
+ {"new_rd_req_cnt"},
+ {"new_rd_req_rtry_cnt"},
+ {"rd_rtry_cnt"},
+ {"wr_rtry_rd_ack_cnt"},
+ {"wr_req_cnt"},
+ {"new_wr_req_cnt"},
+ {"new_wr_req_rtry_cnt"},
+ {"wr_rtry_cnt"},
+ {"wr_disc_cnt"},
+ {"rd_rtry_wr_ack_cnt"},
+ {"txp_wr_cnt"},
+ {"txd_rd_cnt"},
+ {"txd_wr_cnt"},
+ {"rxd_rd_cnt"},
+ {"rxd_wr_cnt"},
+ {"txf_rd_cnt"},
+ {"rxf_wr_cnt"},
+ {"rmac_ttl_1519_4095_frms"},
+ {"rmac_ttl_4096_8191_frms"},
+ {"rmac_ttl_8192_max_frms"},
+ {"rmac_ttl_gt_max_frms"},
+ {"rmac_osized_alt_frms"},
+ {"rmac_jabber_alt_frms"},
+ {"rmac_gt_max_alt_frms"},
+ {"rmac_vlan_frms"},
+ {"rmac_len_discard"},
+ {"rmac_fcs_discard"},
+ {"rmac_pf_discard"},
+ {"rmac_da_discard"},
+ {"rmac_red_discard"},
+ {"rmac_rts_discard"},
+ {"rmac_ingm_full_discard"},
+ {"link_fault_cnt"},
{"\n DRIVER STATISTICS"},
{"single_bit_ecc_errs"},
{"double_bit_ecc_errs"},
+ {"parity_err_cnt"},
+ {"serious_err_cnt"},
+ {"soft_reset_cnt"},
+ {"fifo_full_cnt"},
+ {"ring_full_cnt"},
+ ("alarm_transceiver_temp_high"),
+ ("alarm_transceiver_temp_low"),
+ ("alarm_laser_bias_current_high"),
+ ("alarm_laser_bias_current_low"),
+ ("alarm_laser_output_power_high"),
+ ("alarm_laser_output_power_low"),
+ ("warn_transceiver_temp_high"),
+ ("warn_transceiver_temp_low"),
+ ("warn_laser_bias_current_high"),
+ ("warn_laser_bias_current_low"),
+ ("warn_laser_output_power_high"),
+ ("warn_laser_output_power_low"),
("lro_aggregated_pkts"),
("lro_flush_both_count"),
("lro_out_of_sequence_pkts"),
@@ -220,9 +311,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
* the XAUI.
*/
-#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
#define END_SIGN 0x0
-
static const u64 herc_act_dtx_cfg[] = {
/* Set address */
0x8000051536750000ULL, 0x80000515367500E0ULL,
@@ -244,37 +333,19 @@ static const u64 herc_act_dtx_cfg[] = {
END_SIGN
};
-static const u64 xena_mdio_cfg[] = {
- /* Reset PMA PLL */
- 0xC001010000000000ULL, 0xC0010100000000E0ULL,
- 0xC0010100008000E4ULL,
- /* Remove Reset from PMA PLL */
- 0xC001010000000000ULL, 0xC0010100000000E0ULL,
- 0xC0010100000000E4ULL,
- END_SIGN
-};
-
static const u64 xena_dtx_cfg[] = {
+ /* Set address */
0x8000051500000000ULL, 0x80000515000000E0ULL,
- 0x80000515D93500E4ULL, 0x8001051500000000ULL,
- 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
- 0x8002051500000000ULL, 0x80020515000000E0ULL,
- 0x80020515F21000E4ULL,
- /* Set PADLOOPBACKN */
- 0x8002051500000000ULL, 0x80020515000000E0ULL,
- 0x80020515B20000E4ULL, 0x8003051500000000ULL,
- 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
- 0x8004051500000000ULL, 0x80040515000000E0ULL,
- 0x80040515B20000E4ULL, 0x8005051500000000ULL,
- 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
- SWITCH_SIGN,
- /* Remove PADLOOPBACKN */
+ /* Write data */
+ 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
+ /* Set address */
+ 0x8001051500000000ULL, 0x80010515000000E0ULL,
+ /* Write data */
+ 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
+ /* Set address */
0x8002051500000000ULL, 0x80020515000000E0ULL,
- 0x80020515F20000E4ULL, 0x8003051500000000ULL,
- 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
- 0x8004051500000000ULL, 0x80040515000000E0ULL,
- 0x80040515F20000E4ULL, 0x8005051500000000ULL,
- 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
+ /* Write data */
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
END_SIGN
};
@@ -303,15 +374,15 @@ static const u64 fix_mac[] = {
/* Module Loadable parameters. */
static unsigned int tx_fifo_num = 1;
static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
- {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
+ {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
static unsigned int rx_ring_num = 1;
static unsigned int rx_ring_sz[MAX_RX_RINGS] =
- {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+ {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
static unsigned int rts_frm_len[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = 0 };
static unsigned int rx_ring_mode = 1;
static unsigned int use_continuous_tx_intrs = 1;
-static unsigned int rmac_pause_time = 65535;
+static unsigned int rmac_pause_time = 0x100;
static unsigned int mc_pause_threshold_q0q3 = 187;
static unsigned int mc_pause_threshold_q4q7 = 187;
static unsigned int shared_splits;
@@ -549,11 +620,6 @@ static int init_shared_mem(struct s2io_nic *nic)
rx_blocks->block_dma_addr +
(rxd_size[nic->rxd_mode] * l);
}
-
- mac_control->rings[i].rx_blocks[j].block_virt_addr =
- tmp_v_addr;
- mac_control->rings[i].rx_blocks[j].block_dma_addr =
- tmp_p_addr;
}
/* Interlinking all Rx Blocks */
for (j = 0; j < blk_cnt; j++) {
@@ -772,7 +838,21 @@ static int s2io_verify_pci_mode(nic_t *nic)
return mode;
}
+#define NEC_VENID 0x1033
+#define NEC_DEVID 0x0125
+static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
+{
+ struct pci_dev *tdev = NULL;
+ while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
+ if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
+ if (tdev->bus == s2io_pdev->bus->parent)
+ return 1;
+ }
+ }
+ return 0;
+}
+static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
/**
* s2io_print_pci_mode -
*/
@@ -789,6 +869,14 @@ static int s2io_print_pci_mode(nic_t *nic)
if ( val64 & PCI_MODE_UNKNOWN_MODE)
return -1; /* Unknown PCI mode */
+ config->bus_speed = bus_speed[mode];
+
+ if (s2io_on_nec_bridge(nic->pdev)) {
+ DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
+ nic->dev->name);
+ return mode;
+ }
+
if (val64 & PCI_MODE_32_BITS) {
DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
} else {
@@ -798,35 +886,27 @@ static int s2io_print_pci_mode(nic_t *nic)
switch(mode) {
case PCI_MODE_PCI_33:
DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
- config->bus_speed = 33;
break;
case PCI_MODE_PCI_66:
DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
- config->bus_speed = 133;
break;
case PCI_MODE_PCIX_M1_66:
DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
- config->bus_speed = 133; /* Herc doubles the clock rate */
break;
case PCI_MODE_PCIX_M1_100:
DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
- config->bus_speed = 200;
break;
case PCI_MODE_PCIX_M1_133:
DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
- config->bus_speed = 266;
break;
case PCI_MODE_PCIX_M2_66:
DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
- config->bus_speed = 133;
break;
case PCI_MODE_PCIX_M2_100:
DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
- config->bus_speed = 200;
break;
case PCI_MODE_PCIX_M2_133:
DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
- config->bus_speed = 266;
break;
default:
return -1; /* Unsupported bus speed */
@@ -854,7 +934,7 @@ static int init_nic(struct s2io_nic *nic)
int i, j;
mac_info_t *mac_control;
struct config_param *config;
- int mdio_cnt = 0, dtx_cnt = 0;
+ int dtx_cnt = 0;
unsigned long long mem_share;
int mem_size;
@@ -901,20 +981,6 @@ static int init_nic(struct s2io_nic *nic)
val64 = dev->mtu;
writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
- /*
- * Configuring the XAUI Interface of Xena.
- * ***************************************
- * To Configure the Xena's XAUI, one has to write a series
- * of 64 bit values into two registers in a particular
- * sequence. Hence a macro 'SWITCH_SIGN' has been defined
- * which will be defined in the array of configuration values
- * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
- * to switch writing from one regsiter to another. We continue
- * writing these values until we encounter the 'END_SIGN' macro.
- * For example, After making a series of 21 writes into
- * dtx_control register the 'SWITCH_SIGN' appears and hence we
- * start writing into mdio_control until we encounter END_SIGN.
- */
if (nic->device_type & XFRAME_II_DEVICE) {
while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
@@ -924,35 +990,11 @@ static int init_nic(struct s2io_nic *nic)
dtx_cnt++;
}
} else {
- while (1) {
- dtx_cfg:
- while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
- if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
- dtx_cnt++;
- goto mdio_cfg;
- }
- SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
- &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- dtx_cnt++;
- }
- mdio_cfg:
- while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
- if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
- mdio_cnt++;
- goto dtx_cfg;
- }
- SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
- &bar0->mdio_control, UF);
- val64 = readq(&bar0->mdio_control);
- mdio_cnt++;
- }
- if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
- (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
- break;
- } else {
- goto dtx_cfg;
- }
+ while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
+ SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ dtx_cnt++;
}
}
@@ -994,11 +1036,6 @@ static int init_nic(struct s2io_nic *nic)
}
}
- /* Enable Tx FIFO partition 0. */
- val64 = readq(&bar0->tx_fifo_partition_0);
- val64 |= BIT(0); /* To enable the FIFO partition. */
- writeq(val64, &bar0->tx_fifo_partition_0);
-
/*
* Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
* SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
@@ -1177,6 +1214,11 @@ static int init_nic(struct s2io_nic *nic)
break;
}
+ /* Enable Tx FIFO partition 0. */
+ val64 = readq(&bar0->tx_fifo_partition_0);
+ val64 |= (TX_FIFO_PARTITION_EN);
+ writeq(val64, &bar0->tx_fifo_partition_0);
+
/* Filling the Rx round robin registers as per the
* number of Rings and steering based on QoS.
*/
@@ -1545,19 +1587,26 @@ static int init_nic(struct s2io_nic *nic)
val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
writeq(val64, &bar0->pic_control);
+ if (nic->config.bus_speed == 266) {
+ writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
+ writeq(0x0, &bar0->read_retry_delay);
+ writeq(0x0, &bar0->write_retry_delay);
+ }
+
/*
* Programming the Herc to split every write transaction
* that does not start on an ADB to reduce disconnects.
*/
if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = WREQ_SPLIT_MASK_SET_MASK(255);
- writeq(val64, &bar0->wreq_split_mask);
- }
-
- /* Setting Link stability period to 64 ms */
- if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = MISC_LINK_STABILITY_PRD(3);
+ val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
writeq(val64, &bar0->misc_control);
+ val64 = readq(&bar0->pic_control2);
+ val64 &= ~(BIT(13)|BIT(14)|BIT(15));
+ writeq(val64, &bar0->pic_control2);
+ }
+ if (strstr(nic->product_name, "CX4")) {
+ val64 = TMAC_AVG_IPG(0x17);
+ writeq(val64, &bar0->tmac_avg_ipg);
}
return SUCCESS;
@@ -1948,6 +1997,10 @@ static int start_nic(struct s2io_nic *nic)
val64 |= PRC_CTRL_RC_ENABLED;
else
val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
+ if (nic->device_type == XFRAME_II_DEVICE)
+ val64 |= PRC_CTRL_GROUP_READS;
+ val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
+ val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
writeq(val64, &bar0->prc_ctrl_n[i]);
}
@@ -2018,6 +2071,13 @@ static int start_nic(struct s2io_nic *nic)
val64 |= ADAPTER_EOI_TX_ON;
writeq(val64, &bar0->adapter_control);
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+ /*
+ * Dont see link state interrupts initally on some switches,
+ * so directly scheduling the link state task here.
+ */
+ schedule_work(&nic->set_link_task);
+ }
/* SXE-002: Initialize link and activity LED */
subid = nic->pdev->subsystem_device;
if (((subid & 0xFF) >= 0x07) &&
@@ -2029,12 +2089,6 @@ static int start_nic(struct s2io_nic *nic)
writeq(val64, (void __iomem *)bar0 + 0x2700);
}
- /*
- * Don't see link state interrupts on certain switches, so
- * directly scheduling a link state task from here.
- */
- schedule_work(&nic->set_link_task);
-
return SUCCESS;
}
/**
@@ -2134,7 +2188,7 @@ static void stop_nic(struct s2io_nic *nic)
{
XENA_dev_config_t __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
- u16 interruptible, i;
+ u16 interruptible;
mac_info_t *mac_control;
struct config_param *config;
@@ -2147,12 +2201,10 @@ static void stop_nic(struct s2io_nic *nic)
interruptible |= TX_MAC_INTR | RX_MAC_INTR;
en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
- /* Disable PRCs */
- for (i = 0; i < config->rx_ring_num; i++) {
- val64 = readq(&bar0->prc_ctrl_n[i]);
- val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
- writeq(val64, &bar0->prc_ctrl_n[i]);
- }
+ /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~(ADAPTER_CNTL_EN);
+ writeq(val64, &bar0->adapter_control);
}
static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
@@ -2231,13 +2283,12 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
atomic_read(&nic->rx_bufs_left[ring_no]);
+ block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
+ off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
while (alloc_tab < alloc_cnt) {
block_no = mac_control->rings[ring_no].rx_curr_put_info.
block_index;
- block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
- block_index;
off = mac_control->rings[ring_no].rx_curr_put_info.offset;
- off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
rxdp = mac_control->rings[ring_no].
rx_blocks[block_no].rxds[off].virt_addr;
@@ -2307,9 +2358,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
memset(rxdp, 0, sizeof(RxD1_t));
skb_reserve(skb, NET_IP_ALIGN);
((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
- (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
- rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
+ (nic->pdev, skb->data, size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
} else if (nic->rxd_mode >= RXD_MODE_3A) {
/*
@@ -2516,7 +2567,7 @@ static int s2io_poll(struct net_device *dev, int *budget)
mac_info_t *mac_control;
struct config_param *config;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
- u64 val64;
+ u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
int i;
atomic_inc(&nic->isr_cnt);
@@ -2528,8 +2579,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
nic->pkts_to_process = dev->quota;
org_pkts_to_process = nic->pkts_to_process;
- val64 = readq(&bar0->rx_traffic_int);
writeq(val64, &bar0->rx_traffic_int);
+ val64 = readl(&bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) {
rx_intr_handler(&mac_control->rings[i]);
@@ -2554,7 +2605,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
}
}
/* Re enable the Rx interrupts. */
- en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
+ writeq(0x0, &bar0->rx_traffic_mask);
+ val64 = readl(&bar0->rx_traffic_mask);
atomic_dec(&nic->isr_cnt);
return 0;
@@ -2666,6 +2718,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
((RxD3_t*)rxdp)->Buffer2_ptr,
dev->mtu, PCI_DMA_FROMDEVICE);
}
+ prefetch(skb->data);
rx_osm_handler(ring_data, rxdp);
get_info.offset++;
ring_data->rx_curr_get_info.offset = get_info.offset;
@@ -2737,6 +2790,10 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
if (txdlp->Control_1 & TXD_T_CODE) {
unsigned long long err;
err = txdlp->Control_1 & TXD_T_CODE;
+ if (err & 0x1) {
+ nic->mac_control.stats_info->sw_stat.
+ parity_err_cnt++;
+ }
if ((err >> 48) == 0xA) {
DBG_PRINT(TX_DBG, "TxD returned due \
to loss of link\n");
@@ -2760,7 +2817,8 @@ to loss of link\n");
dev_kfree_skb_irq(skb);
get_info.offset++;
- get_info.offset %= get_info.fifo_len + 1;
+ if (get_info.offset == get_info.fifo_len + 1)
+ get_info.offset = 0;
txdlp = (TxD_t *) fifo_data->list_info
[get_info.offset].list_virt_addr;
fifo_data->tx_curr_get_info.offset =
@@ -2774,6 +2832,256 @@ to loss of link\n");
}
/**
+ * s2io_mdio_write - Function to write in to MDIO registers
+ * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ * @addr : address value
+ * @value : data value
+ * @dev : pointer to net_device structure
+ * Description:
+ * This function is used to write values to the MDIO registers
+ * NONE
+ */
+static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
+{
+ u64 val64 = 0x0;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
+
+ //address transaction
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ //Data transaction
+ val64 = 0x0;
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0)
+ | MDIO_MDIO_DATA(value)
+ | MDIO_OP(MDIO_OP_WRITE_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ val64 = 0x0;
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0)
+ | MDIO_OP(MDIO_OP_READ_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+}
+
+/**
+ * s2io_mdio_read - Function to write in to MDIO registers
+ * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ * @addr : address value
+ * @dev : pointer to net_device structure
+ * Description:
+ * This function is used to read values to the MDIO registers
+ * NONE
+ */
+static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
+{
+ u64 val64 = 0x0;
+ u64 rval64 = 0x0;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
+
+ /* address transaction */
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Data transaction */
+ val64 = 0x0;
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0)
+ | MDIO_OP(MDIO_OP_READ_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Read the value from regs */
+ rval64 = readq(&bar0->mdio_control);
+ rval64 = rval64 & 0xFFFF0000;
+ rval64 = rval64 >> 16;
+ return rval64;
+}
+/**
+ * s2io_chk_xpak_counter - Function to check the status of the xpak counters
+ * @counter : couter value to be updated
+ * @flag : flag to indicate the status
+ * @type : counter type
+ * Description:
+ * This function is to check the status of the xpak counters value
+ * NONE
+ */
+
+static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
+{
+ u64 mask = 0x3;
+ u64 val64;
+ int i;
+ for(i = 0; i <index; i++)
+ mask = mask << 0x2;
+
+ if(flag > 0)
+ {
+ *counter = *counter + 1;
+ val64 = *regs_stat & mask;
+ val64 = val64 >> (index * 0x2);
+ val64 = val64 + 1;
+ if(val64 == 3)
+ {
+ switch(type)
+ {
+ case 1:
+ DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+ "service. Excessive temperatures may "
+ "result in premature transceiver "
+ "failure \n");
+ break;
+ case 2:
+ DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+ "service Excessive bias currents may "
+ "indicate imminent laser diode "
+ "failure \n");
+ break;
+ case 3:
+ DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+ "service Excessive laser output "
+ "power may saturate far-end "
+ "receiver\n");
+ break;
+ default:
+ DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
+ "type \n");
+ }
+ val64 = 0x0;
+ }
+ val64 = val64 << (index * 0x2);
+ *regs_stat = (*regs_stat & (~mask)) | (val64);
+
+ } else {
+ *regs_stat = *regs_stat & (~mask);
+ }
+}
+
+/**
+ * s2io_updt_xpak_counter - Function to update the xpak counters
+ * @dev : pointer to net_device struct
+ * Description:
+ * This function is to upate the status of the xpak counters value
+ * NONE
+ */
+static void s2io_updt_xpak_counter(struct net_device *dev)
+{
+ u16 flag = 0x0;
+ u16 type = 0x0;
+ u16 val16 = 0x0;
+ u64 val64 = 0x0;
+ u64 addr = 0x0;
+
+ nic_t *sp = dev->priv;
+ StatInfo_t *stat_info = sp->mac_control.stats_info;
+
+ /* Check the communication with the MDIO slave */
+ addr = 0x0000;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ if((val64 == 0xFFFF) || (val64 == 0x0000))
+ {
+ DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
+ "Returned %llx\n", (unsigned long long)val64);
+ return;
+ }
+
+ /* Check for the expecte value of 2040 at PMA address 0x0000 */
+ if(val64 != 0x2040)
+ {
+ DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
+ DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
+ (unsigned long long)val64);
+ return;
+ }
+
+ /* Loading the DOM register to MDIO register */
+ addr = 0xA100;
+ s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+ /* Reading the Alarm flags */
+ addr = 0xA070;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+ flag = CHECKBIT(val64, 0x7);
+ type = 1;
+ s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
+ &stat_info->xpak_stat.xpak_regs_stat,
+ 0x0, flag, type);
+
+ if(CHECKBIT(val64, 0x6))
+ stat_info->xpak_stat.alarm_transceiver_temp_low++;
+
+ flag = CHECKBIT(val64, 0x3);
+ type = 2;
+ s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
+ &stat_info->xpak_stat.xpak_regs_stat,
+ 0x2, flag, type);
+
+ if(CHECKBIT(val64, 0x2))
+ stat_info->xpak_stat.alarm_laser_bias_current_low++;
+
+ flag = CHECKBIT(val64, 0x1);
+ type = 3;
+ s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
+ &stat_info->xpak_stat.xpak_regs_stat,
+ 0x4, flag, type);
+
+ if(CHECKBIT(val64, 0x0))
+ stat_info->xpak_stat.alarm_laser_output_power_low++;
+
+ /* Reading the Warning flags */
+ addr = 0xA074;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+ if(CHECKBIT(val64, 0x7))
+ stat_info->xpak_stat.warn_transceiver_temp_high++;
+
+ if(CHECKBIT(val64, 0x6))
+ stat_info->xpak_stat.warn_transceiver_temp_low++;
+
+ if(CHECKBIT(val64, 0x3))
+ stat_info->xpak_stat.warn_laser_bias_current_high++;
+
+ if(CHECKBIT(val64, 0x2))
+ stat_info->xpak_stat.warn_laser_bias_current_low++;
+
+ if(CHECKBIT(val64, 0x1))
+ stat_info->xpak_stat.warn_laser_output_power_high++;
+
+ if(CHECKBIT(val64, 0x0))
+ stat_info->xpak_stat.warn_laser_output_power_low++;
+}
+
+/**
* alarm_intr_handler - Alarm Interrrupt handler
* @nic: device private variable
* Description: If the interrupt was neither because of Rx packet or Tx
@@ -2790,6 +3098,18 @@ static void alarm_intr_handler(struct s2io_nic *nic)
struct net_device *dev = (struct net_device *) nic->dev;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
register u64 val64 = 0, err_reg = 0;
+ u64 cnt;
+ int i;
+ nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
+ /* Handling the XPAK counters update */
+ if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
+ /* waiting for an hour */
+ nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
+ } else {
+ s2io_updt_xpak_counter(dev);
+ /* reset the count to zero */
+ nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
+ }
/* Handling link status change error Intr */
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
@@ -2816,6 +3136,8 @@ static void alarm_intr_handler(struct s2io_nic *nic)
MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
netif_stop_queue(dev);
schedule_work(&nic->rst_timer_task);
+ nic->mac_control.stats_info->sw_stat.
+ soft_reset_cnt++;
}
}
} else {
@@ -2827,11 +3149,13 @@ static void alarm_intr_handler(struct s2io_nic *nic)
/* In case of a serious error, the device will be Reset. */
val64 = readq(&bar0->serr_source);
if (val64 & SERR_SOURCE_ANY) {
+ nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
(unsigned long long)val64);
netif_stop_queue(dev);
schedule_work(&nic->rst_timer_task);
+ nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
}
/*
@@ -2849,6 +3173,35 @@ static void alarm_intr_handler(struct s2io_nic *nic)
ac = readq(&bar0->adapter_control);
schedule_work(&nic->set_link_task);
}
+ /* Check for data parity error */
+ val64 = readq(&bar0->pic_int_status);
+ if (val64 & PIC_INT_GPIO) {
+ val64 = readq(&bar0->gpio_int_reg);
+ if (val64 & GPIO_INT_REG_DP_ERR_INT) {
+ nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
+ schedule_work(&nic->rst_timer_task);
+ nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
+ }
+ }
+
+ /* Check for ring full counter */
+ if (nic->device_type & XFRAME_II_DEVICE) {
+ val64 = readq(&bar0->ring_bump_counter1);
+ for (i=0; i<4; i++) {
+ cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
+ cnt >>= 64 - ((i+1)*16);
+ nic->mac_control.stats_info->sw_stat.ring_full_cnt
+ += cnt;
+ }
+
+ val64 = readq(&bar0->ring_bump_counter2);
+ for (i=0; i<4; i++) {
+ cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
+ cnt >>= 64 - ((i+1)*16);
+ nic->mac_control.stats_info->sw_stat.ring_full_cnt
+ += cnt;
+ }
+ }
/* Other type of interrupts are not being handled now, TODO */
}
@@ -2864,23 +3217,26 @@ static void alarm_intr_handler(struct s2io_nic *nic)
* SUCCESS on success and FAILURE on failure.
*/
-static int wait_for_cmd_complete(nic_t * sp)
+static int wait_for_cmd_complete(void *addr, u64 busy_bit)
{
- XENA_dev_config_t __iomem *bar0 = sp->bar0;
int ret = FAILURE, cnt = 0;
u64 val64;
while (TRUE) {
- val64 = readq(&bar0->rmac_addr_cmd_mem);
- if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+ val64 = readq(addr);
+ if (!(val64 & busy_bit)) {
ret = SUCCESS;
break;
}
- msleep(50);
+
+ if(in_interrupt())
+ mdelay(50);
+ else
+ msleep(50);
+
if (cnt++ > 10)
break;
}
-
return ret;
}
@@ -2919,6 +3275,9 @@ static void s2io_reset(nic_t * sp)
* PCI write to sw_reset register is done by this time.
*/
msleep(250);
+ if (strstr(sp->product_name, "CX4")) {
+ msleep(750);
+ }
/* Restore the PCI state saved during initialization. */
pci_restore_state(sp->pdev);
@@ -3137,7 +3496,7 @@ static void restore_xmsi_data(nic_t *nic)
u64 val64;
int i;
- for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ for (i=0; i< nic->avail_msix_vectors; i++) {
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3156,7 +3515,7 @@ static void store_xmsi_data(nic_t *nic)
int i;
/* Store and display */
- for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ for (i=0; i< nic->avail_msix_vectors; i++) {
val64 = (BIT(15) | vBIT(i, 26, 6));
writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) {
@@ -3284,15 +3643,24 @@ static int s2io_enable_msi_x(nic_t *nic)
writeq(tx_mat, &bar0->tx_mat0_n[7]);
}
+ nic->avail_msix_vectors = 0;
ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+ /* We fail init if error or we get less vectors than min required */
+ if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
+ nic->avail_msix_vectors = ret;
+ ret = pci_enable_msix(nic->pdev, nic->entries, ret);
+ }
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
kfree(nic->entries);
kfree(nic->s2io_entries);
nic->entries = NULL;
nic->s2io_entries = NULL;
+ nic->avail_msix_vectors = 0;
return -ENOMEM;
}
+ if (!nic->avail_msix_vectors)
+ nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
/*
* To enable MSI-X, MSI also needs to be enabled, due to a bug
@@ -3325,8 +3693,6 @@ static int s2io_open(struct net_device *dev)
{
nic_t *sp = dev->priv;
int err = 0;
- int i;
- u16 msi_control; /* Temp variable */
/*
* Make sure you have link off by default every time
@@ -3336,11 +3702,14 @@ static int s2io_open(struct net_device *dev)
sp->last_link_state = 0;
/* Initialize H/W and enable interrupts */
- if (s2io_card_up(sp)) {
+ err = s2io_card_up(sp);
+ if (err) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
- err = -ENODEV;
- goto hw_init_failed;
+ if (err == -ENODEV)
+ goto hw_init_failed;
+ else
+ goto hw_enable_failed;
}
/* Store the values of the MSIX table in the nic_t structure */
@@ -3357,6 +3726,8 @@ failed\n", dev->name);
}
}
if (sp->intr_type == MSI_X) {
+ int i;
+
for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
sprintf(sp->desc1, "%s:MSI-X-%d-TX",
@@ -3409,24 +3780,26 @@ setting_mac_address_failed:
isr_registration_failed:
del_timer_sync(&sp->alarm_timer);
if (sp->intr_type == MSI_X) {
- if (sp->device_type == XFRAME_II_DEVICE) {
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
+ int i;
+ u16 msi_control; /* Temp variable */
- free_irq(vector, arg);
- }
- pci_disable_msix(sp->pdev);
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
- /* Temp */
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
+ free_irq(vector, arg);
}
+ pci_disable_msix(sp->pdev);
+
+ /* Temp */
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
}
else if (sp->intr_type == MSI)
pci_disable_msi(sp->pdev);
+hw_enable_failed:
s2io_reset(sp);
hw_init_failed:
if (sp->intr_type == MSI_X) {
@@ -3454,35 +3827,12 @@ hw_init_failed:
static int s2io_close(struct net_device *dev)
{
nic_t *sp = dev->priv;
- int i;
- u16 msi_control;
flush_scheduled_work();
netif_stop_queue(dev);
/* Reset card, kill tasklet and free Tx and Rx buffers. */
- s2io_card_down(sp);
-
- if (sp->intr_type == MSI_X) {
- if (sp->device_type == XFRAME_II_DEVICE) {
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
+ s2io_card_down(sp, 1);
- free_irq(vector, arg);
- }
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
- }
- }
- else {
- free_irq(sp->pdev->irq, dev);
- if (sp->intr_type == MSI)
- pci_disable_msi(sp->pdev);
- }
sp->device_close_flag = TRUE; /* Device is shut down. */
return 0;
}
@@ -3545,7 +3895,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
/* Avoid "put" pointer going beyond "get" pointer */
- if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
+ if (txdp->Host_Control ||
+ ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
netif_stop_queue(dev);
dev_kfree_skb(skb);
@@ -3655,11 +4006,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
mmiowb();
put_off++;
- put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
+ if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
+ put_off = 0;
mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
/* Avoid "put" pointer going beyond "get" pointer */
- if (((put_off + 1) % queue_len) == get_off) {
+ if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
+ sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
DBG_PRINT(TX_DBG,
"No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
put_off, get_off);
@@ -3795,7 +4148,6 @@ s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
}
-
static void s2io_txpic_intr_handle(nic_t *sp)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3806,41 +4158,56 @@ static void s2io_txpic_intr_handle(nic_t *sp)
val64 = readq(&bar0->gpio_int_reg);
if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
(val64 & GPIO_INT_REG_LINK_UP)) {
+ /*
+ * This is unstable state so clear both up/down
+ * interrupt and adapter to re-evaluate the link state.
+ */
val64 |= GPIO_INT_REG_LINK_DOWN;
val64 |= GPIO_INT_REG_LINK_UP;
writeq(val64, &bar0->gpio_int_reg);
- goto masking;
- }
-
- if (((sp->last_link_state == LINK_UP) &&
- (val64 & GPIO_INT_REG_LINK_DOWN)) ||
- ((sp->last_link_state == LINK_DOWN) &&
- (val64 & GPIO_INT_REG_LINK_UP))) {
val64 = readq(&bar0->gpio_int_mask);
- val64 |= GPIO_INT_MASK_LINK_DOWN;
- val64 |= GPIO_INT_MASK_LINK_UP;
+ val64 &= ~(GPIO_INT_MASK_LINK_UP |
+ GPIO_INT_MASK_LINK_DOWN);
writeq(val64, &bar0->gpio_int_mask);
- s2io_set_link((unsigned long)sp);
}
-masking:
- if (sp->last_link_state == LINK_UP) {
- /*enable down interrupt */
- val64 = readq(&bar0->gpio_int_mask);
- /* unmasks link down intr */
- val64 &= ~GPIO_INT_MASK_LINK_DOWN;
- /* masks link up intr */
- val64 |= GPIO_INT_MASK_LINK_UP;
- writeq(val64, &bar0->gpio_int_mask);
- } else {
- /*enable UP Interrupt */
- val64 = readq(&bar0->gpio_int_mask);
- /* unmasks link up interrupt */
- val64 &= ~GPIO_INT_MASK_LINK_UP;
- /* masks link down interrupt */
- val64 |= GPIO_INT_MASK_LINK_DOWN;
- writeq(val64, &bar0->gpio_int_mask);
+ else if (val64 & GPIO_INT_REG_LINK_UP) {
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(sp, val64,
+ sp->device_enabled_once)) {
+ /* Enable Adapter */
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_CNTL_EN;
+ writeq(val64, &bar0->adapter_control);
+ val64 |= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ if (!sp->device_enabled_once)
+ sp->device_enabled_once = 1;
+
+ s2io_link(sp, LINK_UP);
+ /*
+ * unmask link down interrupt and mask link-up
+ * intr
+ */
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~GPIO_INT_MASK_LINK_DOWN;
+ val64 |= GPIO_INT_MASK_LINK_UP;
+ writeq(val64, &bar0->gpio_int_mask);
+
+ }
+ }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(sp, val64,
+ sp->device_enabled_once)) {
+ s2io_link(sp, LINK_DOWN);
+ /* Link is down so unmaks link up interrupt */
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~GPIO_INT_MASK_LINK_UP;
+ val64 |= GPIO_INT_MASK_LINK_DOWN;
+ writeq(val64, &bar0->gpio_int_mask);
+ }
}
}
+ val64 = readq(&bar0->gpio_int_mask);
}
/**
@@ -3863,7 +4230,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
nic_t *sp = dev->priv;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
int i;
- u64 reason = 0, val64;
+ u64 reason = 0, val64, org_mask;
mac_info_t *mac_control;
struct config_param *config;
@@ -3887,43 +4254,41 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
+ val64 = 0xFFFFFFFFFFFFFFFFULL;
+ /* Store current mask before masking all interrupts */
+ org_mask = readq(&bar0->general_int_mask);
+ writeq(val64, &bar0->general_int_mask);
+
#ifdef CONFIG_S2IO_NAPI
if (reason & GEN_INTR_RXTRAFFIC) {
if (netif_rx_schedule_prep(dev)) {
- en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
- DISABLE_INTRS);
+ writeq(val64, &bar0->rx_traffic_mask);
__netif_rx_schedule(dev);
}
}
#else
- /* If Intr is because of Rx Traffic */
- if (reason & GEN_INTR_RXTRAFFIC) {
- /*
- * rx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
- * cleared and hence a read can be avoided.
- */
- val64 = 0xFFFFFFFFFFFFFFFFULL;
- writeq(val64, &bar0->rx_traffic_int);
- for (i = 0; i < config->rx_ring_num; i++) {
- rx_intr_handler(&mac_control->rings[i]);
- }
+ /*
+ * Rx handler is called by default, without checking for the
+ * cause of interrupt.
+ * rx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ writeq(val64, &bar0->rx_traffic_int);
+ for (i = 0; i < config->rx_ring_num; i++) {
+ rx_intr_handler(&mac_control->rings[i]);
}
#endif
- /* If Intr is because of Tx Traffic */
- if (reason & GEN_INTR_TXTRAFFIC) {
- /*
- * tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
- * cleared and hence a read can be avoided.
- */
- val64 = 0xFFFFFFFFFFFFFFFFULL;
- writeq(val64, &bar0->tx_traffic_int);
+ /*
+ * tx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ writeq(val64, &bar0->tx_traffic_int);
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&mac_control->fifos[i]);
- }
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
if (reason & GEN_INTR_TXPIC)
s2io_txpic_intr_handle(sp);
@@ -3949,6 +4314,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
DBG_PRINT(ERR_DBG, " in ISR!!\n");
clear_bit(0, (&sp->tasklet_status));
atomic_dec(&sp->isr_cnt);
+ writeq(org_mask, &bar0->general_int_mask);
return IRQ_HANDLED;
}
clear_bit(0, (&sp->tasklet_status));
@@ -3964,7 +4330,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
}
}
#endif
-
+ writeq(org_mask, &bar0->general_int_mask);
atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
}
@@ -4067,7 +4433,8 @@ static void s2io_set_multicast(struct net_device *dev)
RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
- wait_for_cmd_complete(sp);
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
sp->m_cast_flg = 1;
sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
@@ -4082,7 +4449,8 @@ static void s2io_set_multicast(struct net_device *dev)
RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
- wait_for_cmd_complete(sp);
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
@@ -4147,7 +4515,8 @@ static void s2io_set_multicast(struct net_device *dev)
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait for command completes */
- if (wait_for_cmd_complete(sp)) {
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
DBG_PRINT(ERR_DBG, "%s: Adding ",
dev->name);
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4177,7 +4546,8 @@ static void s2io_set_multicast(struct net_device *dev)
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait for command completes */
- if (wait_for_cmd_complete(sp)) {
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
DBG_PRINT(ERR_DBG, "%s: Adding ",
dev->name);
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4222,7 +4592,8 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
RMAC_ADDR_CMD_MEM_OFFSET(0);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
- if (wait_for_cmd_complete(sp)) {
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
return FAILURE;
}
@@ -4619,6 +4990,44 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
}
return ret;
}
+static void s2io_vpd_read(nic_t *nic)
+{
+ u8 vpd_data[256],data;
+ int i=0, cnt, fail = 0;
+ int vpd_addr = 0x80;
+
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ strcpy(nic->product_name, "Xframe II 10GbE network adapter");
+ vpd_addr = 0x80;
+ }
+ else {
+ strcpy(nic->product_name, "Xframe I 10GbE network adapter");
+ vpd_addr = 0x50;
+ }
+
+ for (i = 0; i < 256; i +=4 ) {
+ pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
+ pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
+ pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
+ for (cnt = 0; cnt <5; cnt++) {
+ msleep(2);
+ pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
+ if (data == 0x80)
+ break;
+ }
+ if (cnt >= 5) {
+ DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
+ fail = 1;
+ break;
+ }
+ pci_read_config_dword(nic->pdev, (vpd_addr + 4),
+ (u32 *)&vpd_data[i]);
+ }
+ if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
+ memset(nic->product_name, 0, vpd_data[1]);
+ memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
+ }
+}
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
@@ -4931,8 +5340,10 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
u64 val64;
val64 = readq(&bar0->adapter_status);
- if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
+ if(!(LINK_IS_UP(val64)))
*data = 1;
+ else
+ *data = 0;
return 0;
}
@@ -5112,7 +5523,6 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
int i = 0;
nic_t *sp = dev->priv;
StatInfo_t *stat_info = sp->mac_control.stats_info;
- u64 tmp;
s2io_updt_stats(sp);
tmp_stats[i++] =
@@ -5129,9 +5539,19 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
(u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
le32_to_cpu(stat_info->tmac_bcst_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_ttl_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_ucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_nucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
le32_to_cpu(stat_info->tmac_any_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
@@ -5163,11 +5583,27 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
(u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_vld_bcst_frms);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_ttl_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
+ << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
+ << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_discarded_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
+ << 32 | le32_to_cpu(stat_info->rmac_drop_events);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_usized_frms);
@@ -5180,40 +5616,129 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_jabber_frms);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
le32_to_cpu(stat_info->rmac_ip);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
le32_to_cpu(stat_info->rmac_drop_ip);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
le32_to_cpu(stat_info->rmac_icmp);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
le32_to_cpu(stat_info->rmac_udp);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
le32_to_cpu(stat_info->rmac_err_drp_udp);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
le32_to_cpu(stat_info->rmac_pause_cnt);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
le32_to_cpu(stat_info->rmac_accepted_ip);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
tmp_stats[i++] = 0;
tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
+ tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
tmp_stats[i++] = stat_info->sw_stat.sending_both;
tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
- tmp = 0;
if (stat_info->sw_stat.num_aggregations) {
- tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
- do_div(tmp, stat_info->sw_stat.num_aggregations);
+ u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
+ int count = 0;
+ /*
+ * Since 64-bit divide does not work on all platforms,
+ * do repeated subtraction.
+ */
+ while (tmp >= stat_info->sw_stat.num_aggregations) {
+ tmp -= stat_info->sw_stat.num_aggregations;
+ count++;
+ }
+ tmp_stats[i++] = count;
}
- tmp_stats[i++] = tmp;
+ else
+ tmp_stats[i++] = 0;
}
static int s2io_ethtool_get_regs_len(struct net_device *dev)
@@ -5351,7 +5876,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
if (netif_running(dev)) {
- s2io_card_down(sp);
+ s2io_card_down(sp, 0);
netif_stop_queue(dev);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
@@ -5489,12 +6014,172 @@ static void s2io_set_link(unsigned long data)
clear_bit(0, &(nic->link_state));
}
-static void s2io_card_down(nic_t * sp)
+static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
+ struct sk_buff **skb, u64 *temp0, u64 *temp1,
+ u64 *temp2, int size)
+{
+ struct net_device *dev = sp->dev;
+ struct sk_buff *frag_list;
+
+ if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
+ /* allocate skb */
+ if (*skb) {
+ DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
+ /*
+ * As Rx frame are not going to be processed,
+ * using same mapped address for the Rxd
+ * buffer pointer
+ */
+ ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
+ DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+ return -ENOMEM ;
+ }
+ /* storing the mapped addr in a temp variable
+ * such it will be used for next rxd whose
+ * Host Control is NULL
+ */
+ ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single( sp->pdev, (*skb)->data,
+ size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Host_Control = (unsigned long) (*skb);
+ }
+ } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
+ /* Two buffer Mode */
+ if (*skb) {
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+ } else {
+ *skb = dev_alloc_skb(size);
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+ pci_map_single(sp->pdev, (*skb)->data,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Host_Control = (unsigned long) (*skb);
+
+ /* Buffer-1 will be dummy buffer not used */
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+ pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ }
+ } else if ((rxdp->Host_Control == 0)) {
+ /* Three buffer mode */
+ if (*skb) {
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+ } else {
+ *skb = dev_alloc_skb(size);
+
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ /* Buffer-1 receives L3/L4 headers */
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+ pci_map_single( sp->pdev, (*skb)->data,
+ l3l4hdr_size + 4,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * skb_shinfo(skb)->frag_list will have L4
+ * data payload
+ */
+ skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
+ ALIGN_SIZE);
+ if (skb_shinfo(*skb)->frag_list == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
+ failed\n ", dev->name);
+ return -ENOMEM ;
+ }
+ frag_list = skb_shinfo(*skb)->frag_list;
+ frag_list->next = NULL;
+ /*
+ * Buffer-2 receives L4 data payload
+ */
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+ pci_map_single( sp->pdev, frag_list->data,
+ dev->mtu, PCI_DMA_FROMDEVICE);
+ }
+ }
+ return 0;
+}
+static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
+{
+ struct net_device *dev = sp->dev;
+ if (sp->rxd_mode == RXD_MODE_1) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
+ } else if (sp->rxd_mode == RXD_MODE_3B) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
+ } else {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
+ }
+}
+
+static int rxd_owner_bit_reset(nic_t *sp)
+{
+ int i, j, k, blk_cnt = 0, size;
+ mac_info_t * mac_control = &sp->mac_control;
+ struct config_param *config = &sp->config;
+ struct net_device *dev = sp->dev;
+ RxD_t *rxdp = NULL;
+ struct sk_buff *skb = NULL;
+ buffAdd_t *ba = NULL;
+ u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
+
+ /* Calculate the size based on ring mode */
+ size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+ if (sp->rxd_mode == RXD_MODE_1)
+ size += NET_IP_ALIGN;
+ else if (sp->rxd_mode == RXD_MODE_3B)
+ size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+ else
+ size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ blk_cnt = config->rx_cfg[i].num_rxd /
+ (rxd_count[sp->rxd_mode] +1);
+
+ for (j = 0; j < blk_cnt; j++) {
+ for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
+ rxdp = mac_control->rings[i].
+ rx_blocks[j].rxds[k].virt_addr;
+ if(sp->rxd_mode >= RXD_MODE_3A)
+ ba = &mac_control->rings[i].ba[j][k];
+ set_rxd_buffer_pointer(sp, rxdp, ba,
+ &skb,(u64 *)&temp0_64,
+ (u64 *)&temp1_64,
+ (u64 *)&temp2_64, size);
+
+ set_rxd_buffer_size(sp, rxdp, size);
+ wmb();
+ /* flip the Ownership bit to Hardware */
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ }
+ }
+ return 0;
+
+}
+
+static void s2io_card_down(nic_t * sp, int flag)
{
int cnt = 0;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
unsigned long flags;
register u64 val64 = 0;
+ struct net_device *dev = sp->dev;
del_timer_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
@@ -5505,12 +6190,51 @@ static void s2io_card_down(nic_t * sp)
/* disable Tx and Rx traffic on the NIC */
stop_nic(sp);
+ if (flag) {
+ if (sp->intr_type == MSI_X) {
+ int i;
+ u16 msi_control;
+
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+ pci_disable_msix(sp->pdev);
+ } else {
+ free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI)
+ pci_disable_msi(sp->pdev);
+ }
+ }
+ /* Waiting till all Interrupt handlers are complete */
+ cnt = 0;
+ do {
+ msleep(10);
+ if (!atomic_read(&sp->isr_cnt))
+ break;
+ cnt++;
+ } while(cnt < 5);
/* Kill tasklet. */
tasklet_kill(&sp->task);
/* Check if the device is Quiescent and then Reset the NIC */
do {
+ /* As per the HW requirement we need to replenish the
+ * receive buffer to avoid the ring bump. Since there is
+ * no intention of processing the Rx frame at this pointwe are
+ * just settting the ownership bit of rxd in Each Rx
+ * ring to HW and set the appropriate buffer size
+ * based on the ring mode
+ */
+ rxd_owner_bit_reset(sp);
+
val64 = readq(&bar0->adapter_status);
if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
break;
@@ -5528,15 +6252,6 @@ static void s2io_card_down(nic_t * sp)
} while (1);
s2io_reset(sp);
- /* Waiting till all Interrupt handlers are complete */
- cnt = 0;
- do {
- msleep(10);
- if (!atomic_read(&sp->isr_cnt))
- break;
- cnt++;
- } while(cnt < 5);
-
spin_lock_irqsave(&sp->tx_lock, flags);
/* Free all Tx buffers */
free_tx_buffers(sp);
@@ -5637,7 +6352,7 @@ static void s2io_restart_nic(unsigned long data)
struct net_device *dev = (struct net_device *) data;
nic_t *sp = dev->priv;
- s2io_card_down(sp);
+ s2io_card_down(sp, 0);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
dev->name);
@@ -5667,6 +6382,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
if (netif_carrier_ok(dev)) {
schedule_work(&sp->rst_timer_task);
+ sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
}
}
@@ -5695,18 +6411,33 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
((unsigned long) rxdp->Host_Control);
int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
+ unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
lro_t *lro;
skb->dev = dev;
- if (rxdp->Control_1 & RXD_T_CODE) {
- unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
- dev->name, err);
- dev_kfree_skb(skb);
- sp->stats.rx_crc_errors++;
- atomic_dec(&sp->rx_bufs_left[ring_no]);
- rxdp->Host_Control = 0;
- return 0;
+
+ if (err) {
+ /* Check for parity error */
+ if (err & 0x1) {
+ sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
+ }
+
+ /*
+ * Drop the packet if bad transfer code. Exception being
+ * 0x5, which could be due to unsupported IPv6 extension header.
+ * In this case, we let stack handle the packet.
+ * Note that in this case, since checksum will be incorrect,
+ * stack will validate the same.
+ */
+ if (err && ((err >> 48) != 0x5)) {
+ DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
+ dev->name, err);
+ sp->stats.rx_crc_errors++;
+ dev_kfree_skb(skb);
+ atomic_dec(&sp->rx_bufs_left[ring_no]);
+ rxdp->Host_Control = 0;
+ return 0;
+ }
}
/* Updating statistics */
@@ -5792,6 +6523,9 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
clear_lro_session(lro);
goto send_up;
case 0: /* sessions exceeded */
+ case -1: /* non-TCP or not
+ * L2 aggregatable
+ */
case 5: /*
* First pkt in session not
* L3/L4 aggregatable
@@ -5918,13 +6652,6 @@ static void s2io_init_pci(nic_t * sp)
pci_write_config_word(sp->pdev, PCI_COMMAND,
(pci_cmd | PCI_COMMAND_PARITY));
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
-
- /* Forcibly disabling relaxed ordering capability of the card. */
- pcix_cmd &= 0xfffd;
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- pcix_cmd);
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(pcix_cmd));
}
MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
@@ -5954,6 +6681,55 @@ module_param(intr_type, int, 0);
module_param(lro, int, 0);
module_param(lro_max_pkts, int, 0);
+static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
+{
+ if ( tx_fifo_num > 8) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
+ "supported\n");
+ DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
+ tx_fifo_num = 8;
+ }
+ if ( rx_ring_num > 8) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
+ "supported\n");
+ DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
+ rx_ring_num = 8;
+ }
+#ifdef CONFIG_S2IO_NAPI
+ if (*dev_intr_type != INTA) {
+ DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
+ "MSI/MSI-X is enabled. Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+#endif
+#ifndef CONFIG_PCI_MSI
+ if (*dev_intr_type != INTA) {
+ DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
+ "MSI/MSI-X. Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+#else
+ if (*dev_intr_type > MSI_X) {
+ DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
+ "Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+#endif
+ if ((*dev_intr_type == MSI_X) &&
+ ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
+ (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
+ DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
+ "Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+ if (rx_ring_mode > 3) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
+ DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
+ rx_ring_mode = 3;
+ }
+ return SUCCESS;
+}
+
/**
* s2io_init_nic - Initialization of the adapter .
* @pdev : structure containing the PCI related information of the device.
@@ -5984,15 +6760,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
int mode;
u8 dev_intr_type = intr_type;
-#ifdef CONFIG_S2IO_NAPI
- if (dev_intr_type != INTA) {
- DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
-is enabled. Defaulting to INTA\n");
- dev_intr_type = INTA;
- }
- else
- DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
-#endif
+ if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
+ return ret;
if ((ret = pci_enable_device(pdev))) {
DBG_PRINT(ERR_DBG,
@@ -6017,14 +6786,6 @@ is enabled. Defaulting to INTA\n");
pci_disable_device(pdev);
return -ENOMEM;
}
-
- if ((dev_intr_type == MSI_X) &&
- ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
- (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
- DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
-Defaulting to INTA\n");
- dev_intr_type = INTA;
- }
if (dev_intr_type != MSI_X) {
if (pci_request_regions(pdev, s2io_driver_name)) {
DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
@@ -6100,8 +6861,6 @@ Defaulting to INTA\n");
config = &sp->config;
/* Tx side parameters. */
- if (tx_fifo_len[0] == 0)
- tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
config->tx_fifo_num = tx_fifo_num;
for (i = 0; i < MAX_TX_FIFOS; i++) {
config->tx_cfg[i].fifo_len = tx_fifo_len[i];
@@ -6125,8 +6884,6 @@ Defaulting to INTA\n");
config->max_txds = MAX_SKB_FRAGS + 2;
/* Rx side parameters. */
- if (rx_ring_sz[0] == 0)
- rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
config->rx_ring_num = rx_ring_num;
for (i = 0; i < MAX_RX_RINGS; i++) {
config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -6267,8 +7024,8 @@ Defaulting to INTA\n");
val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
writeq(val64, &bar0->rmac_addr_cmd_mem);
- wait_for_cmd_complete(sp);
-
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
tmp64 = readq(&bar0->rmac_addr_data0_mem);
mac_down = (u32) tmp64;
mac_up = (u32) (tmp64 >> 32);
@@ -6322,82 +7079,63 @@ Defaulting to INTA\n");
ret = -ENODEV;
goto register_failed;
}
-
- if (sp->device_type & XFRAME_II_DEVICE) {
- DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
- dev->name);
- DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
+ s2io_vpd_read(sp);
+ DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
+ DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
get_xena_rev_id(sp->pdev),
s2io_driver_version);
- switch(sp->intr_type) {
- case INTA:
- DBG_PRINT(ERR_DBG, ", Intr type INTA");
- break;
- case MSI:
- DBG_PRINT(ERR_DBG, ", Intr type MSI");
- break;
- case MSI_X:
- DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
- break;
- }
-
- DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
- DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
+ DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
sp->def_mac_addr[0].mac_addr[0],
sp->def_mac_addr[0].mac_addr[1],
sp->def_mac_addr[0].mac_addr[2],
sp->def_mac_addr[0].mac_addr[3],
sp->def_mac_addr[0].mac_addr[4],
sp->def_mac_addr[0].mac_addr[5]);
+ if (sp->device_type & XFRAME_II_DEVICE) {
mode = s2io_print_pci_mode(sp);
if (mode < 0) {
- DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
+ DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
ret = -EBADSLT;
+ unregister_netdev(dev);
goto set_swap_failed;
}
- } else {
- DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
- dev->name);
- DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
- get_xena_rev_id(sp->pdev),
- s2io_driver_version);
- switch(sp->intr_type) {
- case INTA:
- DBG_PRINT(ERR_DBG, ", Intr type INTA");
- break;
- case MSI:
- DBG_PRINT(ERR_DBG, ", Intr type MSI");
- break;
- case MSI_X:
- DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
- break;
- }
- DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
- DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
- sp->def_mac_addr[0].mac_addr[0],
- sp->def_mac_addr[0].mac_addr[1],
- sp->def_mac_addr[0].mac_addr[2],
- sp->def_mac_addr[0].mac_addr[3],
- sp->def_mac_addr[0].mac_addr[4],
- sp->def_mac_addr[0].mac_addr[5]);
}
- if (sp->rxd_mode == RXD_MODE_3B)
- DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
- "enabled\n",dev->name);
- if (sp->rxd_mode == RXD_MODE_3A)
- DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
- "enabled\n",dev->name);
-
+ switch(sp->rxd_mode) {
+ case RXD_MODE_1:
+ DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ case RXD_MODE_3B:
+ DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ case RXD_MODE_3A:
+ DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ }
+#ifdef CONFIG_S2IO_NAPI
+ DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
+#endif
+ switch(sp->intr_type) {
+ case INTA:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
+ break;
+ case MSI:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
+ break;
+ case MSI_X:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
+ break;
+ }
if (sp->lro)
DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
- dev->name);
+ dev->name);
/* Initialize device name */
- strcpy(sp->name, dev->name);
- if (sp->device_type & XFRAME_II_DEVICE)
- strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
- else
- strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
+ sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
/* Initialize bimodal Interrupts */
sp->config.bimodal = bimodal;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0a0b5b29d81e..3203732a668d 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,8 @@
#define SUCCESS 0
#define FAILURE -1
+#define CHECKBIT(value, nbit) (value & (1 << nbit))
+
/* Maximum time to flicker LED when asked to identify NIC using ethtool */
#define MAX_FLICKER_TIME 60000 /* 60 Secs */
@@ -78,6 +80,11 @@ static int debug_level = ERR_DBG;
typedef struct {
unsigned long long single_ecc_errs;
unsigned long long double_ecc_errs;
+ unsigned long long parity_err_cnt;
+ unsigned long long serious_err_cnt;
+ unsigned long long soft_reset_cnt;
+ unsigned long long fifo_full_cnt;
+ unsigned long long ring_full_cnt;
/* LRO statistics */
unsigned long long clubbed_frms_cnt;
unsigned long long sending_both;
@@ -87,6 +94,25 @@ typedef struct {
unsigned long long num_aggregations;
} swStat_t;
+/* Xpak releated alarm and warnings */
+typedef struct {
+ u64 alarm_transceiver_temp_high;
+ u64 alarm_transceiver_temp_low;
+ u64 alarm_laser_bias_current_high;
+ u64 alarm_laser_bias_current_low;
+ u64 alarm_laser_output_power_high;
+ u64 alarm_laser_output_power_low;
+ u64 warn_transceiver_temp_high;
+ u64 warn_transceiver_temp_low;
+ u64 warn_laser_bias_current_high;
+ u64 warn_laser_bias_current_low;
+ u64 warn_laser_output_power_high;
+ u64 warn_laser_output_power_low;
+ u64 xpak_regs_stat;
+ u32 xpak_timer_count;
+} xpakStat_t;
+
+
/* The statistics block of Xena */
typedef struct stat_block {
/* Tx MAC statistics counters. */
@@ -263,7 +289,9 @@ typedef struct stat_block {
u32 rmac_accepted_ip_oflow;
u32 reserved_14;
u32 link_fault_cnt;
+ u8 buffer[20];
swStat_t sw_stat;
+ xpakStat_t xpak_stat;
} StatInfo_t;
/*
@@ -659,7 +687,8 @@ typedef struct {
} usr_addr_t;
/* Default Tunable parameters of the NIC. */
-#define DEFAULT_FIFO_LEN 4096
+#define DEFAULT_FIFO_0_LEN 4096
+#define DEFAULT_FIFO_1_7_LEN 512
#define SMALL_BLK_CNT 30
#define LARGE_BLK_CNT 100
@@ -732,7 +761,7 @@ struct s2io_nic {
int device_close_flag;
int device_enabled_once;
- char name[50];
+ char name[60];
struct tasklet_struct task;
volatile unsigned long tasklet_status;
@@ -803,6 +832,8 @@ struct s2io_nic {
char desc1[35];
char desc2[35];
+ int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
+
struct msix_info_st msix_info[0x3f];
#define XFRAME_I_DEVICE 1
@@ -824,6 +855,8 @@ struct s2io_nic {
spinlock_t rx_lock;
atomic_t isr_cnt;
u64 *ufo_in_band_v;
+#define VPD_PRODUCT_NAME_LEN 50
+ u8 product_name[VPD_PRODUCT_NAME_LEN];
};
#define RESET_ERROR 1;
@@ -848,28 +881,32 @@ static inline void writeq(u64 val, void __iomem *addr)
writel((u32) (val), addr);
writel((u32) (val >> 32), (addr + 4));
}
+#endif
-/* In 32 bit modes, some registers have to be written in a
- * particular order to expect correct hardware operation. The
- * macro SPECIAL_REG_WRITE is used to perform such ordered
- * writes. Defines UF (Upper First) and LF (Lower First) will
- * be used to specify the required write order.
+/*
+ * Some registers have to be written in a particular order to
+ * expect correct hardware operation. The macro SPECIAL_REG_WRITE
+ * is used to perform such ordered writes. Defines UF (Upper First)
+ * and LF (Lower First) will be used to specify the required write order.
*/
#define UF 1
#define LF 2
static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
{
+ u32 ret;
+
if (order == LF) {
writel((u32) (val), addr);
+ ret = readl(addr);
writel((u32) (val >> 32), (addr + 4));
+ ret = readl(addr + 4);
} else {
writel((u32) (val >> 32), (addr + 4));
+ ret = readl(addr + 4);
writel((u32) (val), addr);
+ ret = readl(addr);
}
}
-#else
-#define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr)
-#endif
/* Interrupt related values of Xena */
@@ -965,7 +1002,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
static void s2io_set_link(unsigned long data);
static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t *nic);
+static void s2io_card_down(nic_t *nic, int flag);
static int s2io_card_up(nic_t *nic);
static int get_xena_rev_id(struct pci_dev *pdev);
static void restore_xmsi_data(nic_t *nic);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index f5a3bf4d959a..d05874172209 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1,6 +1,6 @@
/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
Copyright 1999 Silicon Integrated System Corporation
- Revision: 1.08.09 Sep. 19 2005
+ Revision: 1.08.10 Apr. 2 2006
Modified from the driver which is originally written by Donald Becker.
@@ -17,9 +17,10 @@
SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
preliminary Rev. 1.0 Jan. 18, 1998
+ Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support
Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
- Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
+ Rev 1.08.07 Nov. 2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support
Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
@@ -77,7 +78,7 @@
#include "sis900.h"
#define SIS900_MODULE_NAME "sis900"
-#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
+#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
static char version[] __devinitdata =
KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
@@ -1402,6 +1403,11 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
rx_flags |= RxATX;
}
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ /* Can accept Jumbo packet */
+ rx_flags |= RxAJAB;
+#endif
+
outl (tx_flags, ioaddr + txcfg);
outl (rx_flags, ioaddr + rxcfg);
}
@@ -1714,18 +1720,26 @@ static int sis900_rx(struct net_device *net_dev)
while (rx_status & OWN) {
unsigned int rx_size;
+ unsigned int data_size;
if (--rx_work_limit < 0)
break;
- rx_size = (rx_status & DSIZE) - CRC_SIZE;
+ data_size = rx_status & DSIZE;
+ rx_size = data_size - CRC_SIZE;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ /* ``TOOLONG'' flag means jumbo packet recived. */
+ if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
+ rx_status &= (~ ((unsigned int)TOOLONG));
+#endif
if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
/* corrupted packet received */
if (netif_msg_rx_err(sis_priv))
printk(KERN_DEBUG "%s: Corrupted packet "
- "received, buffer status = 0x%8.8x.\n",
- net_dev->name, rx_status);
+ "received, buffer status = 0x%8.8x/%d.\n",
+ net_dev->name, rx_status, data_size);
sis_priv->stats.rx_errors++;
if (rx_status & OVERRUN)
sis_priv->stats.rx_over_errors++;
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
index 50323941e3c0..4834e3a15694 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/sis900.h
@@ -310,8 +310,14 @@ enum sis630_revision_id {
#define CRC_SIZE 4
#define MAC_HEADER_SIZE 14
-#define TX_BUF_SIZE 1536
-#define RX_BUF_SIZE 1536
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define MAX_FRAME_SIZE (1518 + 4)
+#else
+#define MAX_FRAME_SIZE 1518
+#endif /* CONFIG_VLAN_802_1Q */
+
+#define TX_BUF_SIZE (MAX_FRAME_SIZE+18)
+#define RX_BUF_SIZE (MAX_FRAME_SIZE+18)
#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5ca5a1b546a1..536dd1cf7f79 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,12 +44,13 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.5"
+#define DRV_VERSION "1.6"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
#define DEFAULT_RX_RING_SIZE 512
#define MAX_TX_RING_SIZE 1024
+#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
#define MAX_RX_RING_SIZE 4096
#define RX_COPY_THRESHOLD 128
#define RX_BUF_SIZE 1536
@@ -401,7 +402,7 @@ static int skge_set_ring_param(struct net_device *dev,
int err;
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
- p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
+ p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
return -EINVAL;
skge->rx_ring.count = p->rx_pending;
@@ -603,7 +604,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
struct skge_hw *hw = skge->hw;
int port = skge->port;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS) {
switch (mode) {
case LED_MODE_OFF:
@@ -663,7 +664,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
PHY_M_LED_MO_RX(MO_LED_ON));
}
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
}
/* blink LED's for finding board */
@@ -2038,7 +2039,7 @@ static void skge_phy_reset(struct skge_port *skge)
netif_stop_queue(skge->netdev);
netif_carrier_off(skge->netdev);
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS) {
genesis_reset(hw, port);
genesis_mac_init(hw, port);
@@ -2046,7 +2047,7 @@ static void skge_phy_reset(struct skge_port *skge)
yukon_reset(hw, port);
yukon_init(hw, port);
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
}
/* Basic MII support */
@@ -2067,12 +2068,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* fallthru */
case SIOCGMIIREG: {
u16 val = 0;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS)
err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
else
err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
data->val_out = val;
break;
}
@@ -2081,14 +2082,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS)
err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
data->val_in);
else
err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
data->val_in);
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
break;
}
return err;
@@ -2191,12 +2192,12 @@ static int skge_up(struct net_device *dev)
goto free_rx_ring;
/* Initialize MAC */
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_mac_init(hw, port);
else
yukon_mac_init(hw, port);
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
/* Configure RAMbuffers */
chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2302,21 +2303,20 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
- struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
struct skge_tx_desc *td;
int i;
u32 control, len;
u64 map;
+ unsigned long flags;
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
return NETDEV_TX_OK;
- if (!spin_trylock(&skge->tx_lock)) {
+ if (!spin_trylock_irqsave(&skge->tx_lock, flags))
/* Collision - tell upper layer to requeue */
return NETDEV_TX_LOCKED;
- }
if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -2325,12 +2325,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
dev->name);
}
- spin_unlock(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
return NETDEV_TX_BUSY;
}
- e = ring->to_use;
+ e = skge->tx_ring.to_use;
td = e->desc;
+ BUG_ON(td->control & BMU_OWN);
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -2371,8 +2372,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
frag->size, PCI_DMA_TODEVICE);
e = e->next;
- e->skb = NULL;
+ e->skb = skb;
tf = e->desc;
+ BUG_ON(tf->control & BMU_OWN);
+
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
pci_unmap_addr_set(e, mapaddr, map);
@@ -2389,56 +2392,68 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
- if (netif_msg_tx_queued(skge))
+ if (unlikely(netif_msg_tx_queued(skge)))
printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
- dev->name, e - ring->start, skb->len);
+ dev->name, e - skge->tx_ring.start, skb->len);
- ring->to_use = e->next;
- if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
+ skge->tx_ring.to_use = e->next;
+ if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
pr_debug("%s: transmit queue full\n", dev->name);
netif_stop_queue(dev);
}
- mmiowb();
- spin_unlock(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
-static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
+
+/* Free resources associated with this reing element */
+static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
+ u32 control)
{
struct pci_dev *pdev = skge->hw->pdev;
- struct skge_element *e;
- for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
- struct sk_buff *skb = e->skb;
- int i;
+ BUG_ON(!e->skb);
- e->skb = NULL;
+ /* skb header vs. fragment */
+ if (control & BMU_STF)
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ pci_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
+ pci_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- e = e->next;
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
- skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- }
+ if (control & BMU_EOF) {
+ if (unlikely(netif_msg_tx_done(skge)))
+ printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
+ skge->netdev->name, e - skge->tx_ring.start);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(e->skb);
}
- skge->tx_ring.to_clean = e;
+ e->skb = NULL;
}
+/* Free all buffers in transmit ring */
static void skge_tx_clean(struct skge_port *skge)
{
+ struct skge_element *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skge->tx_lock, flags);
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+ skge_tx_free(skge, e, td->control);
+ td->control = 0;
+ }
- spin_lock_bh(&skge->tx_lock);
- skge_tx_complete(skge, skge->tx_ring.to_use);
+ skge->tx_ring.to_clean = e;
netif_wake_queue(skge->netdev);
- spin_unlock_bh(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
}
static void skge_tx_timeout(struct net_device *dev)
@@ -2664,32 +2679,28 @@ resubmit:
return NULL;
}
-static void skge_tx_done(struct skge_port *skge)
+/* Free all buffers in Tx ring which are no longer owned by device */
+static void skge_txirq(struct net_device *dev)
{
+ struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
- struct skge_element *e, *last;
+ struct skge_element *e;
+
+ rmb();
spin_lock(&skge->tx_lock);
- last = ring->to_clean;
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
if (td->control & BMU_OWN)
break;
- if (td->control & BMU_EOF) {
- last = e->next;
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - ring->start);
- }
+ skge_tx_free(skge, e, td->control);
}
+ skge->tx_ring.to_clean = e;
- skge_tx_complete(skge, last);
-
- skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
-
- if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
+ if (netif_queue_stopped(skge->netdev)
+ && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
netif_wake_queue(skge->netdev);
spin_unlock(&skge->tx_lock);
@@ -2704,8 +2715,6 @@ static int skge_poll(struct net_device *dev, int *budget)
int to_do = min(dev->quota, *budget);
int work_done = 0;
- skge_tx_done(skge);
-
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
@@ -2737,10 +2746,12 @@ static int skge_poll(struct net_device *dev, int *budget)
return 1; /* not done */
netif_rx_complete(dev);
- mmiowb();
- hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= rxirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ mmiowb();
+ spin_unlock_irq(&hw->hw_lock);
return 0;
}
@@ -2847,16 +2858,16 @@ static void skge_error_irq(struct skge_hw *hw)
}
/*
- * Interrupt from PHY are handled in tasklet (soft irq)
+ * Interrupt from PHY are handled in work queue
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(unsigned long data)
+static void skge_extirq(void *arg)
{
- struct skge_hw *hw = (struct skge_hw *) data;
+ struct skge_hw *hw = arg;
int port;
- spin_lock(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
for (port = 0; port < hw->ports; port++) {
struct net_device *dev = hw->dev[port];
struct skge_port *skge = netdev_priv(dev);
@@ -2868,10 +2879,12 @@ static void skge_extirq(unsigned long data)
bcom_phy_intr(skge);
}
}
- spin_unlock(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2884,54 +2897,68 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0)
return IRQ_NONE;
+ spin_lock(&hw->hw_lock);
+ status &= hw->intr_mask;
if (status & IS_EXT_REG) {
hw->intr_mask &= ~IS_EXT_REG;
- tasklet_schedule(&hw->ext_tasklet);
+ schedule_work(&hw->phy_work);
}
- if (status & (IS_R1_F|IS_XA1_F)) {
- skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
- netif_rx_schedule(hw->dev[0]);
+ if (status & IS_XA1_F) {
+ skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
+ skge_txirq(hw->dev[0]);
}
- if (status & (IS_R2_F|IS_XA2_F)) {
- skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
- netif_rx_schedule(hw->dev[1]);
+ if (status & IS_R1_F) {
+ skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
+ hw->intr_mask &= ~IS_R1_F;
+ netif_rx_schedule(hw->dev[0]);
}
- if (likely((status & hw->intr_mask) == 0))
- return IRQ_HANDLED;
+ if (status & IS_PA_TO_TX1)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
if (status & IS_PA_TO_RX1) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
- ++skge->net_stats.rx_over_errors;
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
- }
- if (status & IS_PA_TO_RX2) {
- struct skge_port *skge = netdev_priv(hw->dev[1]);
++skge->net_stats.rx_over_errors;
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
}
- if (status & IS_PA_TO_TX1)
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
-
- if (status & IS_PA_TO_TX2)
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
if (status & IS_MAC1)
skge_mac_intr(hw, 0);
- if (status & IS_MAC2)
- skge_mac_intr(hw, 1);
+ if (hw->dev[1]) {
+ if (status & IS_XA2_F) {
+ skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
+ skge_txirq(hw->dev[1]);
+ }
+
+ if (status & IS_R2_F) {
+ skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
+ hw->intr_mask &= ~IS_R2_F;
+ netif_rx_schedule(hw->dev[1]);
+ }
+
+ if (status & IS_PA_TO_RX2) {
+ struct skge_port *skge = netdev_priv(hw->dev[1]);
+ ++skge->net_stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ }
+
+ if (status & IS_PA_TO_TX2)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+ if (status & IS_MAC2)
+ skge_mac_intr(hw, 1);
+ }
if (status & IS_HW_ERR)
skge_error_irq(hw);
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -2957,7 +2984,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_1 + port*8,
dev->dev_addr, ETH_ALEN);
@@ -2970,7 +2997,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
return 0;
}
@@ -3082,6 +3109,7 @@ static int skge_reset(struct skge_hw *hw)
else
hw->ram_size = t8 * 4096;
+ spin_lock_init(&hw->hw_lock);
hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
if (hw->ports > 1)
hw->intr_mask |= IS_PORT_2;
@@ -3150,14 +3178,14 @@ static int skge_reset(struct skge_hw *hw)
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
for (i = 0; i < hw->ports; i++) {
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_reset(hw, i);
else
yukon_reset(hw, i);
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
return 0;
}
@@ -3305,8 +3333,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
}
hw->pdev = pdev;
- spin_lock_init(&hw->phy_lock);
- tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
+ mutex_init(&hw->phy_mutex);
+ INIT_WORK(&hw->phy_work, skge_extirq, hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
@@ -3334,6 +3362,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
goto err_out_led_off;
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto err_out_free_netdev;
+ }
+
+
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3388,11 +3424,15 @@ static void __devexit skge_remove(struct pci_dev *pdev)
dev0 = hw->dev[0];
unregister_netdev(dev0);
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask = 0;
skge_write32(hw, B0_IMSK, 0);
+ spin_unlock_irq(&hw->hw_lock);
+
skge_write16(hw, B0_LED, LED_STAT_OFF);
skge_write8(hw, B0_CTST, CS_RST_SET);
- tasklet_kill(&hw->ext_tasklet);
+ flush_scheduled_work();
free_irq(pdev->irq, hw);
pci_release_regions(pdev);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 1f1ce88c8186..ed19ff47ce11 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2388,6 +2388,7 @@ struct skge_ring {
struct skge_hw {
void __iomem *regs;
struct pci_dev *pdev;
+ spinlock_t hw_lock;
u32 intr_mask;
struct net_device *dev[2];
@@ -2399,9 +2400,8 @@ struct skge_hw {
u32 ram_size;
u32 ram_offset;
u16 phy_addr;
-
- struct tasklet_struct ext_tasklet;
- spinlock_t phy_lock;
+ struct work_struct phy_work;
+ struct mutex phy_mutex;
};
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 959109609d85..fba1e4d4d83d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -187,12 +187,11 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
return v;
}
-static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
+static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
{
u16 power_control;
u32 reg1;
int vaux;
- int ret = 0;
pr_debug("sky2_set_power_state %d\n", state);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -275,12 +274,10 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
break;
default:
printk(KERN_ERR PFX "Unknown power state %d\n", state);
- ret = -1;
}
sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
- return ret;
}
static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
@@ -2164,6 +2161,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
/* If idle then force a fake soft NAPI poll once a second
* to work around cases where sharing an edge triggered interrupt.
*/
+static inline void sky2_idle_start(struct sky2_hw *hw)
+{
+ if (idle_timeout > 0)
+ mod_timer(&hw->idle_timer,
+ jiffies + msecs_to_jiffies(idle_timeout));
+}
+
static void sky2_idle(unsigned long arg)
{
struct sky2_hw *hw = (struct sky2_hw *) arg;
@@ -2183,6 +2187,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+ if (!~status)
+ goto out;
+
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2219,7 +2226,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if (sky2_more_work(hw))
return 1;
-
+out:
netif_rx_complete(dev0);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2248,8 +2255,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
static void sky2_netpoll(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
+ struct net_device *dev0 = sky2->hw->dev[0];
- sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
+ if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
+ __netif_rx_schedule(dev0);
}
#endif
@@ -3350,9 +3359,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
- if (idle_timeout > 0)
- mod_timer(&hw->idle_timer,
- jiffies + msecs_to_jiffies(idle_timeout));
+ sky2_idle_start(hw);
pci_set_drvdata(pdev, hw);
@@ -3425,8 +3432,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
int i;
+ pci_power_t pstate = pci_choose_state(pdev, state);
+
+ if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
+ return -EINVAL;
+
+ del_timer_sync(&hw->idle_timer);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
if (dev) {
@@ -3435,10 +3448,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
sky2_down(dev);
netif_device_detach(dev);
+ netif_poll_disable(dev);
}
}
- return sky2_set_power_state(hw, pci_choose_state(pdev, state));
+ sky2_write32(hw, B0_IMSK, 0);
+ pci_save_state(pdev);
+ sky2_set_power_state(hw, pstate);
+ return 0;
}
static int sky2_resume(struct pci_dev *pdev)
@@ -3448,27 +3465,31 @@ static int sky2_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- err = sky2_set_power_state(hw, PCI_D0);
- if (err)
- goto out;
+ sky2_set_power_state(hw, PCI_D0);
err = sky2_reset(hw);
if (err)
goto out;
- for (i = 0; i < 2; i++) {
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
if (dev && netif_running(dev)) {
netif_device_attach(dev);
+ netif_poll_enable(dev);
+
err = sky2_up(dev);
if (err) {
printk(KERN_ERR PFX "%s: could not up: %d\n",
dev->name, err);
dev_close(dev);
- break;
+ goto out;
}
}
}
+
+ sky2_idle_start(hw);
out:
return err;
}
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 3db30cd0625e..5b4e8529d4ab 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -553,7 +553,7 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
+int __init
init_module(void)
{
struct net_device *dev;
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index b3e397d7ca85..ff9bd97746dc 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -421,7 +421,7 @@ static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
MODULE_LICENSE("GPL");
-int init_module(void)
+int __init init_module(void)
{
int this_dev, found = 0;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
new file mode 100644
index 000000000000..bdd8702ead54
--- /dev/null
+++ b/drivers/net/smc911x.c
@@ -0,0 +1,2307 @@
+/*
+ * smc911x.c
+ * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
+ *
+ * Copyright (C) 2005 Sensoria Corp
+ * Derived from the unified SMC91x driver by Nicolas Pitre
+ * and the smsc911x.c reference driver by SMSC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Arguments:
+ * watchdog = TX watchdog timeout
+ * tx_fifo_kb = Size of TX FIFO in KB
+ *
+ * History:
+ * 04/16/05 Dustin McIntire Initial version
+ */
+static const char version[] =
+ "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
+
+/* Debugging options */
+#define ENABLE_SMC_DEBUG_RX 0
+#define ENABLE_SMC_DEBUG_TX 0
+#define ENABLE_SMC_DEBUG_DMA 0
+#define ENABLE_SMC_DEBUG_PKTS 0
+#define ENABLE_SMC_DEBUG_MISC 0
+#define ENABLE_SMC_DEBUG_FUNC 0
+
+#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
+#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
+#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
+#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
+#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
+#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
+
+#ifndef SMC_DEBUG
+#define SMC_DEBUG ( SMC_DEBUG_RX | \
+ SMC_DEBUG_TX | \
+ SMC_DEBUG_DMA | \
+ SMC_DEBUG_PKTS | \
+ SMC_DEBUG_MISC | \
+ SMC_DEBUG_FUNC \
+ )
+#endif
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/workqueue.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "smc911x.h"
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+static int tx_fifo_kb=8;
+module_param(tx_fifo_kb, int, 0400);
+MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
+
+MODULE_LICENSE("GPL");
+
+/*
+ * The internal workings of the driver. If you are changing anything
+ * here with the SMC stuff, you should have the datasheet and know
+ * what you are doing.
+ */
+#define CARDNAME "smc911x"
+
+/*
+ * Use power-down feature of the chip
+ */
+#define POWER_DOWN 1
+
+
+/* store this information for the driver.. */
+struct smc911x_local {
+ /*
+ * If I have to wait until the DMA is finished and ready to reload a
+ * packet, I will store the skbuff here. Then, the DMA will send it
+ * out and free it.
+ */
+ struct sk_buff *pending_tx_skb;
+
+ /*
+ * these are things that the kernel wants me to keep, so users
+ * can find out semi-useless statistics of how well the card is
+ * performing
+ */
+ struct net_device_stats stats;
+
+ /* version/revision of the SMC911x chip */
+ u16 version;
+ u16 revision;
+
+ /* FIFO sizes */
+ int tx_fifo_kb;
+ int tx_fifo_size;
+ int rx_fifo_size;
+ int afc_cfg;
+
+ /* Contains the current active receive/phy mode */
+ int ctl_rfduplx;
+ int ctl_rspeed;
+
+ u32 msg_enable;
+ u32 phy_type;
+ struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+ int work_pending;
+
+ int tx_throttle;
+ spinlock_t lock;
+
+#ifdef SMC_USE_DMA
+ /* DMA needs the physical address of the chip */
+ u_long physaddr;
+ int rxdma;
+ int txdma;
+ int rxdma_active;
+ int txdma_active;
+ struct sk_buff *current_rx_skb;
+ struct sk_buff *current_tx_skb;
+ struct device *dev;
+#endif
+};
+
+#if SMC_DEBUG > 0
+#define DBG(n, args...) \
+ do { \
+ if (SMC_DEBUG & (n)) \
+ printk(args); \
+ } while (0)
+
+#define PRINTK(args...) printk(args)
+#else
+#define DBG(n, args...) do { } while (0)
+#define PRINTK(args...) printk(KERN_DEBUG args)
+#endif
+
+#if SMC_DEBUG_PKTS > 0
+static void PRINT_PKT(u_char *buf, int length)
+{
+ int i;
+ int remainder;
+ int lines;
+
+ lines = length / 16;
+ remainder = length % 16;
+
+ for (i = 0; i < lines ; i ++) {
+ int cur;
+ for (cur = 0; cur < 8; cur++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+ }
+ for (i = 0; i < remainder/2 ; i++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+}
+#else
+#define PRINT_PKT(x...) do { } while (0)
+#endif
+
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) do { \
+ unsigned int __mask; \
+ unsigned long __flags; \
+ spin_lock_irqsave(&lp->lock, __flags); \
+ __mask = SMC_GET_INT_EN(); \
+ __mask |= (x); \
+ SMC_SET_INT_EN(__mask); \
+ spin_unlock_irqrestore(&lp->lock, __flags); \
+} while (0)
+
+/* this disables an interrupt from the interrupt mask register */
+#define SMC_DISABLE_INT(x) do { \
+ unsigned int __mask; \
+ unsigned long __flags; \
+ spin_lock_irqsave(&lp->lock, __flags); \
+ __mask = SMC_GET_INT_EN(); \
+ __mask &= ~(x); \
+ SMC_SET_INT_EN(__mask); \
+ spin_unlock_irqrestore(&lp->lock, __flags); \
+} while (0)
+
+/*
+ * this does a soft reset on the device
+ */
+static void smc911x_reset(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int reg, timeout=0, resets=1;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /* Take out of PM setting first */
+ if ((SMC_GET_PMT_CTRL() & PMT_CTRL_READY_) == 0) {
+ /* Write to the bytetest will take out of powerdown */
+ SMC_SET_BYTE_TEST(0);
+ timeout=10;
+ do {
+ udelay(10);
+ reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_;
+ } while ( timeout-- && !reg);
+ if (timeout == 0) {
+ PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+ return;
+ }
+ }
+
+ /* Disable all interrupts */
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_INT_EN(0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ while (resets--) {
+ SMC_SET_HW_CFG(HW_CFG_SRST_);
+ timeout=10;
+ do {
+ udelay(10);
+ reg = SMC_GET_HW_CFG();
+ /* If chip indicates reset timeout then try again */
+ if (reg & HW_CFG_SRST_TO_) {
+ PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+ resets++;
+ break;
+ }
+ } while ( timeout-- && (reg & HW_CFG_SRST_));
+ }
+ if (timeout == 0) {
+ PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+ return;
+ }
+
+ /* make sure EEPROM has finished loading before setting GPIO_CFG */
+ timeout=1000;
+ while ( timeout-- && (SMC_GET_E2P_CMD() & E2P_CMD_EPC_BUSY_)) {
+ udelay(10);
+ }
+ if (timeout == 0){
+ PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+ return;
+ }
+
+ /* Initialize interrupts */
+ SMC_SET_INT_EN(0);
+ SMC_ACK_INT(-1);
+
+ /* Reset the FIFO level and flow control settings */
+ SMC_SET_HW_CFG((lp->tx_fifo_kb & 0xF) << 16);
+//TODO: Figure out what appropriate pause time is
+ SMC_SET_FLOW(FLOW_FCPT_ | FLOW_FCEN_);
+ SMC_SET_AFC_CFG(lp->afc_cfg);
+
+
+ /* Set to LED outputs */
+ SMC_SET_GPIO_CFG(0x70070000);
+
+ /*
+ * Deassert IRQ for 1*10us for edge type interrupts
+ * and drive IRQ pin push-pull
+ */
+ SMC_SET_IRQ_CFG( (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_ );
+
+ /* clear anything saved */
+ if (lp->pending_tx_skb != NULL) {
+ dev_kfree_skb (lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ }
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static void smc911x_enable(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned mask, cfg, cr;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ SMC_SET_MAC_ADDR(dev->dev_addr);
+
+ /* Enable TX */
+ cfg = SMC_GET_HW_CFG();
+ cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
+ cfg |= HW_CFG_SF_;
+ SMC_SET_HW_CFG(cfg);
+ SMC_SET_FIFO_TDA(0xFF);
+ /* Update TX stats on every 64 packets received or every 1 sec */
+ SMC_SET_FIFO_TSL(64);
+ SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(cr);
+ cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
+ SMC_SET_MAC_CR(cr);
+ SMC_SET_TX_CFG(TX_CFG_TX_ON_);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Add 2 byte padding to start of packets */
+ SMC_SET_RX_CFG((2<<8) & RX_CFG_RXDOFF_);
+
+ /* Turn on receiver and enable RX */
+ if (cr & MAC_CR_RXEN_)
+ DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_MAC_CR( cr | MAC_CR_RXEN_ );
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Interrupt on every received packet */
+ SMC_SET_FIFO_RSA(0x01);
+ SMC_SET_FIFO_RSL(0x00);
+
+ /* now, enable interrupts */
+ mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
+ INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
+ INT_EN_PHY_INT_EN_;
+ if (IS_REV_A(lp->revision))
+ mask|=INT_EN_RDFL_EN_;
+ else {
+ mask|=INT_EN_RDFO_EN_;
+ }
+ SMC_ENABLE_INT(mask);
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void smc911x_shutdown(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned cr;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+
+ /* Disable IRQ's */
+ SMC_SET_INT_EN(0);
+
+ /* Turn of Rx and TX */
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(cr);
+ cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
+ SMC_SET_MAC_CR(cr);
+ SMC_SET_TX_CFG(TX_CFG_STOP_TX_);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static inline void smc911x_drop_pkt(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int fifo_count, timeout, reg;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+ fifo_count = SMC_GET_RX_FIFO_INF() & 0xFFFF;
+ if (fifo_count <= 4) {
+ /* Manually dump the packet data */
+ while (fifo_count--)
+ SMC_GET_RX_FIFO();
+ } else {
+ /* Fast forward through the bad packet */
+ SMC_SET_RX_DP_CTRL(RX_DP_CTRL_FFWD_BUSY_);
+ timeout=50;
+ do {
+ udelay(10);
+ reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_;
+ } while ( timeout-- && reg);
+ if (timeout == 0) {
+ PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+ }
+ }
+}
+
+/*
+ * This is the procedure to handle the receipt of a packet.
+ * It should be called after checking for packet presence in
+ * the RX status FIFO. It must be called with the spin lock
+ * already held.
+ */
+static inline void smc911x_rcv(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int pkt_len, status;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
+ dev->name, __FUNCTION__);
+ status = SMC_GET_RX_STS_FIFO();
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+ dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+ pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
+ if (status & RX_STS_ES_) {
+ /* Deal with a bad packet */
+ lp->stats.rx_errors++;
+ if (status & RX_STS_CRC_ERR_)
+ lp->stats.rx_crc_errors++;
+ else {
+ if (status & RX_STS_LEN_ERR_)
+ lp->stats.rx_length_errors++;
+ if (status & RX_STS_MCAST_)
+ lp->stats.multicast++;
+ }
+ /* Remove the bad packet data from the RX FIFO */
+ smc911x_drop_pkt(dev);
+ } else {
+ /* Receive a valid packet */
+ /* Alloc a buffer with extra room for DMA alignment */
+ skb=dev_alloc_skb(pkt_len+32);
+ if (unlikely(skb == NULL)) {
+ PRINTK( "%s: Low memory, rcvd packet dropped.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ smc911x_drop_pkt(dev);
+ return;
+ }
+ /* Align IP header to 32 bits
+ * Note that the device is configured to add a 2
+ * byte padding to the packet start, so we really
+ * want to write to the orignal data pointer */
+ data = skb->data;
+ skb_reserve(skb, 2);
+ skb_put(skb,pkt_len-4);
+#ifdef SMC_USE_DMA
+ {
+ unsigned int fifo;
+ /* Lower the FIFO threshold if possible */
+ fifo = SMC_GET_FIFO_INT();
+ if (fifo & 0xFF) fifo--;
+ DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
+ dev->name, fifo & 0xff);
+ SMC_SET_FIFO_INT(fifo);
+ /* Setup RX DMA */
+ SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
+ lp->rxdma_active = 1;
+ lp->current_rx_skb = skb;
+ SMC_PULL_DATA(data, (pkt_len+2+15) & ~15);
+ /* Packet processing deferred to DMA RX interrupt */
+ }
+#else
+ SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
+ SMC_PULL_DATA(data, pkt_len+2+3);
+
+ DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
+ PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len-4;
+#endif
+ }
+}
+
+/*
+ * This is called to actually send a packet to the chip.
+ */
+static void smc911x_hardware_send_pkt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ struct sk_buff *skb;
+ unsigned int cmdA, cmdB, len;
+ unsigned char *buf;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__);
+ BUG_ON(lp->pending_tx_skb == NULL);
+
+ skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+
+ /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
+ /* cmdB {31:16] pkt tag [10:0] length */
+#ifdef SMC_USE_DMA
+ /* 16 byte buffer alignment mode */
+ buf = (char*)((u32)(skb->data) & ~0xF);
+ len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
+ cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
+ TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+ skb->len;
+#else
+ buf = (char*)((u32)skb->data & ~0x3);
+ len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
+ cmdA = (((u32)skb->data & 0x3) << 16) |
+ TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+ skb->len;
+#endif
+ /* tag is packet length so we can use this in stats update later */
+ cmdB = (skb->len << 16) | (skb->len & 0x7FF);
+
+ DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+ dev->name, len, len, buf, cmdA, cmdB);
+ SMC_SET_TX_FIFO(cmdA);
+ SMC_SET_TX_FIFO(cmdB);
+
+ DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+ PRINT_PKT(buf, len <= 64 ? len : 64);
+
+ /* Send pkt via PIO or DMA */
+#ifdef SMC_USE_DMA
+ lp->current_tx_skb = skb;
+ SMC_PUSH_DATA(buf, len);
+ /* DMA complete IRQ will free buffer and set jiffies */
+#else
+ SMC_PUSH_DATA(buf, len);
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+#endif
+ spin_lock_irqsave(&lp->lock, flags);
+ if (!lp->tx_throttle) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ SMC_ENABLE_INT(INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
+}
+
+/*
+ * Since I am not sure if I will have enough room in the chip's ram
+ * to store the packet, I call this routine which either sends it
+ * now, or set the card to generates an interrupt when ready
+ * for the packet.
+ */
+static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int free;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+ dev->name, __FUNCTION__);
+
+ BUG_ON(lp->pending_tx_skb != NULL);
+
+ free = SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TDFREE_;
+ DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+
+ /* Turn off the flow when running out of space in FIFO */
+ if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
+ DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
+ dev->name, free);
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Reenable when at least 1 packet of size MTU present */
+ SMC_SET_FIFO_TDA((SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
+ lp->tx_throttle = 1;
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ /* Drop packets when we run out of space in TX FIFO
+ * Account for overhead required for:
+ *
+ * Tx command words 8 bytes
+ * Start offset 15 bytes
+ * End padding 15 bytes
+ */
+ if (unlikely(free < (skb->len + 8 + 15 + 15))) {
+ printk("%s: No Tx free space %d < %d\n",
+ dev->name, free, skb->len);
+ lp->pending_tx_skb = NULL;
+ lp->stats.tx_errors++;
+ lp->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+#ifdef SMC_USE_DMA
+ {
+ /* If the DMA is already running then defer this packet Tx until
+ * the DMA IRQ starts it
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->txdma_active) {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+ lp->pending_tx_skb = skb;
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+ } else {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+ lp->txdma_active = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+#endif
+ lp->pending_tx_skb = skb;
+ smc911x_hardware_send_pkt(dev);
+
+ return 0;
+}
+
+/*
+ * This handles a TX status interrupt, which is only called when:
+ * - a TX error occurred, or
+ * - TX of a packet completed.
+ */
+static void smc911x_tx(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int tx_status;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+ dev->name, __FUNCTION__);
+
+ /* Collect the TX status */
+ while (((SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
+ DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
+ dev->name,
+ (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16);
+ tx_status = SMC_GET_TX_STS_FIFO();
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes+=tx_status>>16;
+ DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
+ dev->name, (tx_status & 0xffff0000) >> 16,
+ tx_status & 0x0000ffff);
+ /* count Tx errors, but ignore lost carrier errors when in
+ * full-duplex mode */
+ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
+ !(tx_status & 0x00000306))) {
+ lp->stats.tx_errors++;
+ }
+ if (tx_status & TX_STS_MANY_COLL_) {
+ lp->stats.collisions+=16;
+ lp->stats.tx_aborted_errors++;
+ } else {
+ lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
+ }
+ /* carrier error only has meaning for half-duplex communication */
+ if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
+ !lp->ctl_rfduplx) {
+ lp->stats.tx_carrier_errors++;
+ }
+ if (tx_status & TX_STS_LATE_COLL_) {
+ lp->stats.collisions++;
+ lp->stats.tx_aborted_errors++;
+ }
+ }
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+/*
+ * Reads a register from the MII Management serial interface
+ */
+
+static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int phydata;
+
+ SMC_GET_MII(phyreg, phyaddr, phydata);
+
+ DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+ __FUNCTION__, phyaddr, phyreg, phydata);
+ return phydata;
+}
+
+
+/*
+ * Writes a register to the MII Management serial interface
+ */
+static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
+ int phydata)
+{
+ unsigned long ioaddr = dev->base_addr;
+
+ DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __FUNCTION__, phyaddr, phyreg, phydata);
+
+ SMC_SET_MII(phyreg, phyaddr, phydata);
+}
+
+/*
+ * Finds and reports the PHY address (115 and 117 have external
+ * PHY interface 118 has internal only
+ */
+static void smc911x_phy_detect(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ int phyaddr;
+ unsigned int cfg, id1, id2;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ lp->phy_type = 0;
+
+ /*
+ * Scan all 32 PHY addresses if necessary, starting at
+ * PHY#1 to PHY#31, and then PHY#0 last.
+ */
+ switch(lp->version) {
+ case 0x115:
+ case 0x117:
+ cfg = SMC_GET_HW_CFG();
+ if (cfg & HW_CFG_EXT_PHY_DET_) {
+ cfg &= ~HW_CFG_PHY_CLK_SEL_;
+ cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+ SMC_SET_HW_CFG(cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg |= HW_CFG_EXT_PHY_EN_;
+ SMC_SET_HW_CFG(cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg &= ~HW_CFG_PHY_CLK_SEL_;
+ cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
+ SMC_SET_HW_CFG(cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg |= HW_CFG_SMI_SEL_;
+ SMC_SET_HW_CFG(cfg);
+
+ for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
+
+ /* Read the PHY identifiers */
+ SMC_GET_PHY_ID1(phyaddr & 31, id1);
+ SMC_GET_PHY_ID2(phyaddr & 31, id2);
+
+ /* Make sure it is a valid identifier */
+ if (id1 != 0x0000 && id1 != 0xffff &&
+ id1 != 0x8000 && id2 != 0x0000 &&
+ id2 != 0xffff && id2 != 0x8000) {
+ /* Save the PHY's address */
+ lp->mii.phy_id = phyaddr & 31;
+ lp->phy_type = id1 << 16 | id2;
+ break;
+ }
+ }
+ }
+ default:
+ /* Internal media only */
+ SMC_GET_PHY_ID1(1, id1);
+ SMC_GET_PHY_ID2(1, id2);
+ /* Save the PHY's address */
+ lp->mii.phy_id = 1;
+ lp->phy_type = id1 << 16 | id2;
+ }
+
+ DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+ dev->name, id1, id2, lp->mii.phy_id);
+}
+
+/*
+ * Sets the PHY to a configuration as determined by the user.
+ * Called with spin_lock held.
+ */
+static int smc911x_phy_fixed(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ int bmcr;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /* Enter Link Disable state */
+ SMC_GET_PHY_BMCR(phyaddr, bmcr);
+ bmcr |= BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+ /*
+ * Set our fixed capabilities
+ * Disable auto-negotiation
+ */
+ bmcr &= ~BMCR_ANENABLE;
+ if (lp->ctl_rfduplx)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (lp->ctl_rspeed == 100)
+ bmcr |= BMCR_SPEED100;
+
+ /* Write our capabilities to the phy control register */
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+ /* Re-Configure the Receive/Phy Control register */
+ bmcr &= ~BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+ return 1;
+}
+
+/*
+ * smc911x_phy_reset - reset the phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Issue a software reset for the specified PHY and
+ * wait up to 100ms for the reset to complete. We should
+ * not access the PHY for 50ms after issuing the reset.
+ *
+ * The time to wait appears to be dependent on the PHY.
+ *
+ */
+static int smc911x_phy_reset(struct net_device *dev, int phy)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int timeout;
+ unsigned long flags;
+ unsigned int reg;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ reg = SMC_GET_PMT_CTRL();
+ reg &= ~0xfffff030;
+ reg |= PMT_CTRL_PHY_RST_;
+ SMC_SET_PMT_CTRL(reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ for (timeout = 2; timeout; timeout--) {
+ msleep(50);
+ spin_lock_irqsave(&lp->lock, flags);
+ reg = SMC_GET_PMT_CTRL();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (!(reg & PMT_CTRL_PHY_RST_)) {
+ /* extra delay required because the phy may
+ * not be completed with its reset
+ * when PHY_BCR_RESET_ is cleared. 256us
+ * should suffice, but use 500us to be safe
+ */
+ udelay(500);
+ break;
+ }
+ }
+
+ return reg & PMT_CTRL_PHY_RST_;
+}
+
+/*
+ * smc911x_phy_powerdown - powerdown phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Power down the specified PHY
+ */
+static void smc911x_phy_powerdown(struct net_device *dev, int phy)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int bmcr;
+
+ /* Enter Link Disable state */
+ SMC_GET_PHY_BMCR(phy, bmcr);
+ bmcr |= BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(phy, bmcr);
+}
+
+/*
+ * smc911x_phy_check_media - check the media status and adjust BMCR
+ * @dev: net device
+ * @init: set true for initialisation
+ *
+ * Select duplex mode depending on negotiation state. This
+ * also updates our carrier state.
+ */
+static void smc911x_phy_check_media(struct net_device *dev, int init)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ unsigned int bmcr, cr;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
+ /* duplex state has changed */
+ SMC_GET_PHY_BMCR(phyaddr, bmcr);
+ SMC_GET_MAC_CR(cr);
+ if (lp->mii.full_duplex) {
+ DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+ bmcr |= BMCR_FULLDPLX;
+ cr |= MAC_CR_RCVOWN_;
+ } else {
+ DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+ bmcr &= ~BMCR_FULLDPLX;
+ cr &= ~MAC_CR_RCVOWN_;
+ }
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+ SMC_SET_MAC_CR(cr);
+ }
+}
+
+/*
+ * Configures the specified PHY through the MII management interface
+ * using Autonegotiation.
+ * Calls smc911x_phy_fixed() if the user has requested a certain config.
+ * If RPC ANEG bit is set, the media selection is dependent purely on
+ * the selection by the MII (either in the MII BMCR reg or the result
+ * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
+ * is controlled by the RPC SPEED and RPC DPLX bits.
+ */
+static void smc911x_phy_configure(void *data)
+{
+ struct net_device *dev = data;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ int my_phy_caps; /* My PHY capabilities */
+ int my_ad_caps; /* My Advertised capabilities */
+ int status;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+
+ /*
+ * We should not be called if phy_type is zero.
+ */
+ if (lp->phy_type == 0)
+ goto smc911x_phy_configure_exit;
+
+ if (smc911x_phy_reset(dev, phyaddr)) {
+ printk("%s: PHY reset timed out\n", dev->name);
+ goto smc911x_phy_configure_exit;
+ }
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /*
+ * Enable PHY Interrupts (for register 18)
+ * Interrupts listed here are enabled
+ */
+ SMC_SET_PHY_INT_MASK(phyaddr, PHY_INT_MASK_ENERGY_ON_ |
+ PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
+ PHY_INT_MASK_LINK_DOWN_);
+
+ /* If the user requested no auto neg, then go set his request */
+ if (lp->mii.force_media) {
+ smc911x_phy_fixed(dev);
+ goto smc911x_phy_configure_exit;
+ }
+
+ /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
+ SMC_GET_PHY_BMSR(phyaddr, my_phy_caps);
+ if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
+ printk(KERN_INFO "Auto negotiation NOT supported\n");
+ smc911x_phy_fixed(dev);
+ goto smc911x_phy_configure_exit;
+ }
+
+ /* CSMA capable w/ both pauses */
+ my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ if (my_phy_caps & BMSR_100BASE4)
+ my_ad_caps |= ADVERTISE_100BASE4;
+ if (my_phy_caps & BMSR_100FULL)
+ my_ad_caps |= ADVERTISE_100FULL;
+ if (my_phy_caps & BMSR_100HALF)
+ my_ad_caps |= ADVERTISE_100HALF;
+ if (my_phy_caps & BMSR_10FULL)
+ my_ad_caps |= ADVERTISE_10FULL;
+ if (my_phy_caps & BMSR_10HALF)
+ my_ad_caps |= ADVERTISE_10HALF;
+
+ /* Disable capabilities not selected by our user */
+ if (lp->ctl_rspeed != 100)
+ my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
+
+ if (!lp->ctl_rfduplx)
+ my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
+
+ /* Update our Auto-Neg Advertisement Register */
+ SMC_SET_PHY_MII_ADV(phyaddr, my_ad_caps);
+ lp->mii.advertising = my_ad_caps;
+
+ /*
+ * Read the register back. Without this, it appears that when
+ * auto-negotiation is restarted, sometimes it isn't ready and
+ * the link does not come up.
+ */
+ udelay(10);
+ SMC_GET_PHY_MII_ADV(phyaddr, status);
+
+ DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
+ DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+
+ /* Restart auto-negotiation process in order to advertise my caps */
+ SMC_SET_PHY_BMCR(phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
+
+ smc911x_phy_check_media(dev, 1);
+
+smc911x_phy_configure_exit:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ lp->work_pending = 0;
+}
+
+/*
+ * smc911x_phy_interrupt
+ *
+ * Purpose: Handle interrupts relating to PHY register 18. This is
+ * called from the "hard" interrupt handler under our private spinlock.
+ */
+static void smc911x_phy_interrupt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ int status;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ if (lp->phy_type == 0)
+ return;
+
+ smc911x_phy_check_media(dev, 0);
+ /* read to clear status bits */
+ SMC_GET_PHY_INT_SRC(phyaddr,status);
+ DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
+ dev->name, status & 0xffff);
+ DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
+ dev->name, SMC_GET_AFC_CFG());
+}
+
+/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
+
+/*
+ * This is the main routine of the driver, to handle the device when
+ * it needs some attention.
+ */
+static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int status, mask, timeout;
+ unsigned int rx_overrun=0, cr, pkts;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Spurious interrupt check */
+ if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
+ (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
+ return IRQ_NONE;
+ }
+
+ mask = SMC_GET_INT_EN();
+ SMC_SET_INT_EN(0);
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 8;
+
+
+ do {
+ status = SMC_GET_INT();
+
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+ dev->name, status, mask, status & ~mask);
+
+ status &= mask;
+ if (!status)
+ break;
+
+ /* Handle SW interrupt condition */
+ if (status & INT_STS_SW_INT_) {
+ SMC_ACK_INT(INT_STS_SW_INT_);
+ mask &= ~INT_EN_SW_INT_EN_;
+ }
+ /* Handle various error conditions */
+ if (status & INT_STS_RXE_) {
+ SMC_ACK_INT(INT_STS_RXE_);
+ lp->stats.rx_errors++;
+ }
+ if (status & INT_STS_RXDFH_INT_) {
+ SMC_ACK_INT(INT_STS_RXDFH_INT_);
+ lp->stats.rx_dropped+=SMC_GET_RX_DROP();
+ }
+ /* Undocumented interrupt-what is the right thing to do here? */
+ if (status & INT_STS_RXDF_INT_) {
+ SMC_ACK_INT(INT_STS_RXDF_INT_);
+ }
+
+ /* Rx Data FIFO exceeds set level */
+ if (status & INT_STS_RDFL_) {
+ if (IS_REV_A(lp->revision)) {
+ rx_overrun=1;
+ SMC_GET_MAC_CR(cr);
+ cr &= ~MAC_CR_RXEN_;
+ SMC_SET_MAC_CR(cr);
+ DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ }
+ SMC_ACK_INT(INT_STS_RDFL_);
+ }
+ if (status & INT_STS_RDFO_) {
+ if (!IS_REV_A(lp->revision)) {
+ SMC_GET_MAC_CR(cr);
+ cr &= ~MAC_CR_RXEN_;
+ SMC_SET_MAC_CR(cr);
+ rx_overrun=1;
+ DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ }
+ SMC_ACK_INT(INT_STS_RDFO_);
+ }
+ /* Handle receive condition */
+ if ((status & INT_STS_RSFL_) || rx_overrun) {
+ unsigned int fifo;
+ DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+ fifo = SMC_GET_RX_FIFO_INF();
+ pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
+ DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
+ dev->name, pkts, fifo & 0xFFFF );
+ if (pkts != 0) {
+#ifdef SMC_USE_DMA
+ unsigned int fifo;
+ if (lp->rxdma_active){
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+ "%s: RX DMA active\n", dev->name);
+ /* The DMA is already running so up the IRQ threshold */
+ fifo = SMC_GET_FIFO_INT() & ~0xFF;
+ fifo |= pkts & 0xFF;
+ DBG(SMC_DEBUG_RX,
+ "%s: Setting RX stat FIFO threshold to %d\n",
+ dev->name, fifo & 0xff);
+ SMC_SET_FIFO_INT(fifo);
+ } else
+#endif
+ smc911x_rcv(dev);
+ }
+ SMC_ACK_INT(INT_STS_RSFL_);
+ }
+ /* Handle transmit FIFO available */
+ if (status & INT_STS_TDFA_) {
+ DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+ SMC_SET_FIFO_TDA(0xFF);
+ lp->tx_throttle = 0;
+#ifdef SMC_USE_DMA
+ if (!lp->txdma_active)
+#endif
+ netif_wake_queue(dev);
+ SMC_ACK_INT(INT_STS_TDFA_);
+ }
+ /* Handle transmit done condition */
+#if 1
+ if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
+ "%s: Tx stat FIFO limit (%d) /GPT irq\n",
+ dev->name, (SMC_GET_FIFO_INT() & 0x00ff0000) >> 16);
+ smc911x_tx(dev);
+ SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+ SMC_ACK_INT(INT_STS_TSFL_);
+ SMC_ACK_INT(INT_STS_TSFL_ | INT_STS_GPT_INT_);
+ }
+#else
+ if (status & INT_STS_TSFL_) {
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+ smc911x_tx(dev);
+ SMC_ACK_INT(INT_STS_TSFL_);
+ }
+
+ if (status & INT_STS_GPT_INT_) {
+ DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+ dev->name,
+ SMC_GET_IRQ_CFG(),
+ SMC_GET_FIFO_INT(),
+ SMC_GET_RX_CFG());
+ DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
+ "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+ dev->name,
+ (SMC_GET_RX_FIFO_INF() & 0x00ff0000) >> 16,
+ SMC_GET_RX_FIFO_INF() & 0xffff,
+ SMC_GET_RX_STS_FIFO_PEEK());
+ SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+ SMC_ACK_INT(INT_STS_GPT_INT_);
+ }
+#endif
+
+ /* Handle PHY interupt condition */
+ if (status & INT_STS_PHY_INT_) {
+ DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+ smc911x_phy_interrupt(dev);
+ SMC_ACK_INT(INT_STS_PHY_INT_);
+ }
+ } while (--timeout);
+
+ /* restore mask state */
+ SMC_SET_INT_EN(mask);
+
+ DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
+ dev->name, 8-timeout);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef SMC_USE_DMA
+static void
+smc911x_tx_dma_irq(int dma, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb = lp->current_tx_skb;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+ /* Clear the DMA interrupt sources */
+ SMC_DMA_ACK_IRQ(dev, dma);
+ BUG_ON(skb == NULL);
+ dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
+ dev->trans_start = jiffies;
+ dev_kfree_skb_irq(skb);
+ lp->current_tx_skb = NULL;
+ if (lp->pending_tx_skb != NULL)
+ smc911x_hardware_send_pkt(dev);
+ else {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+ "%s: No pending Tx packets. DMA disabled\n", dev->name);
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->txdma_active = 0;
+ if (!lp->tx_throttle) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+ "%s: TX DMA irq completed\n", dev->name);
+}
+static void
+smc911x_rx_dma_irq(int dma, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)data;
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb = lp->current_rx_skb;
+ unsigned long flags;
+ unsigned int pkts;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+ /* Clear the DMA interrupt sources */
+ SMC_DMA_ACK_IRQ(dev, dma);
+ dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
+ BUG_ON(skb == NULL);
+ lp->current_rx_skb = NULL;
+ PRINT_PKT(skb->data, skb->len);
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += skb->len;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16;
+ if (pkts != 0) {
+ smc911x_rcv(dev);
+ }else {
+ lp->rxdma_active = 0;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+ "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+ dev->name, pkts);
+}
+#endif /* SMC_USE_DMA */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc911x_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smc911x_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* Our watchdog timed out. Called by the networking layer */
+static void smc911x_timeout(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int status, mask;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ status = SMC_GET_INT();
+ mask = SMC_GET_INT_EN();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+ dev->name, status, mask);
+
+ /* Dump the current TX FIFO contents and restart */
+ mask = SMC_GET_TX_CFG();
+ SMC_SET_TX_CFG(mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
+ /*
+ * Reconfiguring the PHY doesn't seem like a bad idea here, but
+ * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
+ * which calls schedule(). Hence we use a work queue.
+ */
+ if (lp->phy_type != 0) {
+ if (schedule_work(&lp->phy_configure)) {
+ lp->work_pending = 1;
+ }
+ }
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void smc911x_set_multicast_list(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int multicast_table[2];
+ unsigned int mcr, update_multicast = 0;
+ unsigned long flags;
+ /* table for flipping the order of 5 bits */
+ static const unsigned char invert5[] =
+ {0x00, 0x10, 0x08, 0x18, 0x04, 0x14, 0x0C, 0x1C,
+ 0x02, 0x12, 0x0A, 0x1A, 0x06, 0x16, 0x0E, 0x1E,
+ 0x01, 0x11, 0x09, 0x19, 0x05, 0x15, 0x0D, 0x1D,
+ 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F};
+
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(mcr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+
+ DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+ mcr |= MAC_CR_PRMS_;
+ }
+ /*
+ * Here, I am setting this to accept all multicast packets.
+ * I don't need to zero the multicast table, because the flag is
+ * checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+ mcr |= MAC_CR_MCPAS_;
+ }
+
+ /*
+ * This sets the internal hardware table to filter out unwanted
+ * multicast packets before they take up memory.
+ *
+ * The SMC chip uses a hash table where the high 6 bits of the CRC of
+ * address are the offset into the table. If that bit is 1, then the
+ * multicast packet is accepted. Otherwise, it's dropped silently.
+ *
+ * To use the 6 bits as an offset into the table, the high 1 bit is
+ * the number of the 32 bit register, while the low 5 bits are the bit
+ * within that register.
+ */
+ else if (dev->mc_count) {
+ int i;
+ struct dev_mc_list *cur_addr;
+
+ /* Set the Hash perfec mode */
+ mcr |= MAC_CR_HPFILT_;
+
+ /* start with a table of all zeros: reject all */
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ cur_addr = dev->mc_list;
+ for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ int position;
+
+ /* do we have a pointer here? */
+ if (!cur_addr)
+ break;
+ /* make sure this is a multicast address -
+ shouldn't this be a given if we have it here ? */
+ if (!(*cur_addr->dmi_addr & 1))
+ continue;
+
+ /* only use the low order bits */
+ position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert5[position&0x1F]&0x1] |=
+ (1<<invert5[(position>>1)&0x1F]);
+ }
+
+ /* be sure I get rid of flags I might have set */
+ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ /* now, the table can be loaded into the chipset */
+ update_multicast = 1;
+ } else {
+ DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
+ dev->name);
+ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ /*
+ * since I'm disabling all multicast entirely, I need to
+ * clear the multicast list
+ */
+ memset(multicast_table, 0, sizeof(multicast_table));
+ update_multicast = 1;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_MAC_CR(mcr);
+ if (update_multicast) {
+ DBG(SMC_DEBUG_MISC,
+ "%s: update mcast hash table 0x%08x 0x%08x\n",
+ dev->name, multicast_table[0], multicast_table[1]);
+ SMC_SET_HASHL(multicast_table[0]);
+ SMC_SET_HASHH(multicast_table[1]);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int
+smc911x_open(struct net_device *dev)
+{
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* reset the hardware */
+ smc911x_reset(dev);
+
+ /* Configure the PHY, initialize the link state */
+ smc911x_phy_configure(dev);
+
+ /* Turn on Tx + Rx */
+ smc911x_enable(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * smc911x_close
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int smc911x_close(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ smc911x_shutdown(dev);
+
+ if (lp->phy_type != 0) {
+ /* We need to ensure that no calls to
+ * smc911x_phy_configure are pending.
+
+ * flush_scheduled_work() cannot be called because we
+ * are running with the netlink semaphore held (from
+ * devinet_ioctl()) and the pending work queue
+ * contains linkwatch_event() (scheduled by
+ * netif_carrier_off() above). linkwatch_event() also
+ * wants the netlink semaphore.
+ */
+ while (lp->work_pending)
+ schedule();
+ smc911x_phy_powerdown(dev, lp->mii.phy_id);
+ }
+
+ if (lp->pending_tx_skb) {
+ dev_kfree_skb(lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *smc911x_query_statistics(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+
+ return &lp->stats;
+}
+
+/*
+ * Ethtool support
+ */
+static int
+smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int ret, status;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_ethtool_gset(&lp->mii, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP | SUPPORTED_AUI;
+
+ if (lp->ctl_rspeed == 10)
+ cmd->speed = SPEED_10;
+ else if (lp->ctl_rspeed == 100)
+ cmd->speed = SPEED_100;
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ if (lp->mii.phy_id==1)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->port = 0;
+ SMC_GET_PHY_SPECIAL(lp->mii.phy_id, status);
+ cmd->duplex =
+ (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret;
+ unsigned long flags;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_ethtool_sset(&lp->mii, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ if (cmd->autoneg != AUTONEG_DISABLE ||
+ cmd->speed != SPEED_10 ||
+ (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
+ (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ return -EINVAL;
+
+ lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void
+smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, CARDNAME, sizeof(info->driver));
+ strncpy(info->version, version, sizeof(info->version));
+ strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info));
+}
+
+static int smc911x_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret = -EINVAL;
+ unsigned long flags;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_nway_restart(&lp->mii);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ return ret;
+}
+
+static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ lp->msg_enable = level;
+}
+
+static int smc911x_ethtool_getregslen(struct net_device *dev)
+{
+ /* System regs + MAC regs + PHY regs */
+ return (((E2P_CMD - ID_REV)/4 + 1) +
+ (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
+}
+
+static void smc911x_ethtool_getregs(struct net_device *dev,
+ struct ethtool_regs* regs, void *buf)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ u32 reg,i,j=0;
+ u32 *data = (u32*)buf;
+
+ regs->version = lp->version;
+ for(i=ID_REV;i<=E2P_CMD;i+=4) {
+ data[j++] = SMC_inl(ioaddr,i);
+ }
+ for(i=MAC_CR;i<=WUCSR;i++) {
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CSR(i, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ data[j++] = reg;
+ }
+ for(i=0;i<=31;i++) {
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MII(i, lp->mii.phy_id, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ data[j++] = reg & 0xFFFF;
+ }
+}
+
+static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int timeout;
+ int e2p_cmd;
+
+ e2p_cmd = SMC_GET_E2P_CMD();
+ for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
+ if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
+ PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
+ dev->name, __FUNCTION__);
+ return -EFAULT;
+ }
+ mdelay(1);
+ e2p_cmd = SMC_GET_E2P_CMD();
+ }
+ if (timeout == 0) {
+ PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
+ dev->name, __FUNCTION__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
+ int cmd, int addr)
+{
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ SMC_SET_E2P_CMD(E2P_CMD_EPC_BUSY_ |
+ ((cmd) & (0x7<<28)) |
+ ((addr) & 0xFF));
+ return 0;
+}
+
+static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
+ u8 *data)
+{
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ *data = SMC_GET_E2P_DATA();
+ return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
+ u8 data)
+{
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ SMC_SET_E2P_DATA(data);
+ return 0;
+}
+
+static int smc911x_ethtool_geteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ u8 eebuf[SMC911X_EEPROM_LEN];
+ int i, ret;
+
+ for(i=0;i<SMC911X_EEPROM_LEN;i++) {
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
+ return ret;
+ if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
+ return ret;
+ }
+ memcpy(data, eebuf+eeprom->offset, eeprom->len);
+ return 0;
+}
+
+static int smc911x_ethtool_seteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i, ret;
+
+ /* Enable erase */
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
+ return ret;
+ for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
+ /* erase byte */
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
+ return ret;
+ /* write byte */
+ if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
+ return ret;
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
+ return ret;
+ }
+ return 0;
+}
+
+static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
+{
+ return SMC911X_EEPROM_LEN;
+}
+
+static struct ethtool_ops smc911x_ethtool_ops = {
+ .get_settings = smc911x_ethtool_getsettings,
+ .set_settings = smc911x_ethtool_setsettings,
+ .get_drvinfo = smc911x_ethtool_getdrvinfo,
+ .get_msglevel = smc911x_ethtool_getmsglevel,
+ .set_msglevel = smc911x_ethtool_setmsglevel,
+ .nway_reset = smc911x_ethtool_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_regs_len = smc911x_ethtool_getregslen,
+ .get_regs = smc911x_ethtool_getregs,
+ .get_eeprom_len = smc911x_ethtool_geteeprom_len,
+ .get_eeprom = smc911x_ethtool_geteeprom,
+ .set_eeprom = smc911x_ethtool_seteeprom,
+};
+
+/*
+ * smc911x_findirq
+ *
+ * This routine has a simple purpose -- make the SMC chip generate an
+ * interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ */
+static int __init smc911x_findirq(unsigned long ioaddr)
+{
+ int timeout = 20;
+ unsigned long cookie;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+
+ cookie = probe_irq_on();
+
+ /*
+ * Force a SW interrupt
+ */
+
+ SMC_SET_INT_EN(INT_EN_SW_INT_EN_);
+
+ /*
+ * Wait until positive that the interrupt has been generated
+ */
+ do {
+ int int_status;
+ udelay(10);
+ int_status = SMC_GET_INT_EN();
+ if (int_status & INT_EN_SW_INT_EN_)
+ break; /* got the interrupt */
+ } while (--timeout);
+
+ /*
+ * there is really nothing that I can do here if timeout fails,
+ * as autoirq_report will return a 0 anyway, which is what I
+ * want in this case. Plus, the clean up is needed in both
+ * cases.
+ */
+
+ /* and disable all interrupts again */
+ SMC_SET_INT_EN(0);
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+}
+
+/*
+ * Function: smc911x_probe(unsigned long ioaddr)
+ *
+ * Purpose:
+ * Tests to see if a given ioaddr points to an SMC911x chip.
+ * Returns a 0 on success
+ *
+ * Algorithm:
+ * (1) see if the endian word is OK
+ * (1) see if I recognize the chip ID in the appropriate register
+ *
+ * Here I do typical initialization tasks.
+ *
+ * o Initialize the structure if needed
+ * o print out my vanity message if not done so already
+ * o print out what type of hardware is detected
+ * o print out the ethernet address
+ * o find the IRQ
+ * o set up my private data
+ * o configure the dev structure with my subroutines
+ * o actually GRAB the irq.
+ * o GRAB the region
+ */
+static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int i, retval;
+ unsigned int val, chip_id, revision;
+ const char *version_string;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /* First, see if the endian word is recognized */
+ val = SMC_GET_BYTE_TEST();
+ DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+ if (val != 0x87654321) {
+ printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * check if the revision register is something that I
+ * recognize. These might need to be added to later,
+ * as future revisions could be added.
+ */
+ chip_id = SMC_GET_PN();
+ DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+ for(i=0;chip_ids[i].id != 0; i++) {
+ if (chip_ids[i].id == chip_id) break;
+ }
+ if (!chip_ids[i].id) {
+ printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ version_string = chip_ids[i].name;
+
+ revision = SMC_GET_REV();
+ DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+
+ /* At this point I'll assume that the chip is an SMC911x. */
+ DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+
+ /* Validate the TX FIFO size requested */
+ if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
+ printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+ retval = -EINVAL;
+ goto err_out;
+ }
+
+ /* fill in some of the fields */
+ dev->base_addr = ioaddr;
+ lp->version = chip_ids[i].id;
+ lp->revision = revision;
+ lp->tx_fifo_kb = tx_fifo_kb;
+ /* Reverse calculate the RX FIFO size from the TX */
+ lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
+ lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
+
+ /* Set the automatic flow control values */
+ switch(lp->tx_fifo_kb) {
+ /*
+ * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
+ * AFC_LO is AFC_HI/2
+ * BACK_DUR is about 5uS*(AFC_LO) rounded down
+ */
+ case 2:/* 13440 Rx Data Fifo Size */
+ lp->afc_cfg=0x008C46AF;break;
+ case 3:/* 12480 Rx Data Fifo Size */
+ lp->afc_cfg=0x0082419F;break;
+ case 4:/* 11520 Rx Data Fifo Size */
+ lp->afc_cfg=0x00783C9F;break;
+ case 5:/* 10560 Rx Data Fifo Size */
+ lp->afc_cfg=0x006E374F;break;
+ case 6:/* 9600 Rx Data Fifo Size */
+ lp->afc_cfg=0x0064328F;break;
+ case 7:/* 8640 Rx Data Fifo Size */
+ lp->afc_cfg=0x005A2D7F;break;
+ case 8:/* 7680 Rx Data Fifo Size */
+ lp->afc_cfg=0x0050287F;break;
+ case 9:/* 6720 Rx Data Fifo Size */
+ lp->afc_cfg=0x0046236F;break;
+ case 10:/* 5760 Rx Data Fifo Size */
+ lp->afc_cfg=0x003C1E6F;break;
+ case 11:/* 4800 Rx Data Fifo Size */
+ lp->afc_cfg=0x0032195F;break;
+ /*
+ * AFC_HI is ~1520 bytes less than RX Data Fifo Size
+ * AFC_LO is AFC_HI/2
+ * BACK_DUR is about 5uS*(AFC_LO) rounded down
+ */
+ case 12:/* 3840 Rx Data Fifo Size */
+ lp->afc_cfg=0x0024124F;break;
+ case 13:/* 2880 Rx Data Fifo Size */
+ lp->afc_cfg=0x0015073F;break;
+ case 14:/* 1920 Rx Data Fifo Size */
+ lp->afc_cfg=0x0006032F;break;
+ default:
+ PRINTK("%s: ERROR -- no AFC_CFG setting found",
+ dev->name);
+ break;
+ }
+
+ DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
+ "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+ lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+
+ spin_lock_init(&lp->lock);
+
+ /* Get the MAC address */
+ SMC_GET_MAC_ADDR(dev->dev_addr);
+
+ /* now, reset the chip, and put it into a known state */
+ smc911x_reset(dev);
+
+ /*
+ * If dev->irq is 0, then the device has to be banged on to see
+ * what the IRQ is.
+ *
+ * Specifying an IRQ is done with the assumption that the user knows
+ * what (s)he is doing. No checking is done!!!!
+ */
+ if (dev->irq < 1) {
+ int trials;
+
+ trials = 3;
+ while (trials--) {
+ dev->irq = smc911x_findirq(ioaddr);
+ if (dev->irq)
+ break;
+ /* kick the card and try again */
+ smc911x_reset(dev);
+ }
+ }
+ if (dev->irq == 0) {
+ printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+ dev->name);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->irq = irq_canonicalize(dev->irq);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->open = smc911x_open;
+ dev->stop = smc911x_close;
+ dev->hard_start_xmit = smc911x_hard_start_xmit;
+ dev->tx_timeout = smc911x_timeout;
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ dev->get_stats = smc911x_query_statistics;
+ dev->set_multicast_list = smc911x_set_multicast_list;
+ dev->ethtool_ops = &smc911x_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = smc911x_poll_controller;
+#endif
+
+ INIT_WORK(&lp->phy_configure, smc911x_phy_configure, dev);
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
+ lp->mii.force_media = 0;
+ lp->mii.full_duplex = 0;
+ lp->mii.dev = dev;
+ lp->mii.mdio_read = smc911x_phy_read;
+ lp->mii.mdio_write = smc911x_phy_write;
+
+ /*
+ * Locate the phy, if any.
+ */
+ smc911x_phy_detect(dev);
+
+ /* Set default parameters */
+ lp->msg_enable = NETIF_MSG_LINK;
+ lp->ctl_rfduplx = 1;
+ lp->ctl_rspeed = 100;
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, &smc911x_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval)
+ goto err_out;
+
+ set_irq_type(dev->irq, IRQT_FALLING);
+
+#ifdef SMC_USE_DMA
+ lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
+ lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
+ lp->rxdma_active = 0;
+ lp->txdma_active = 0;
+ dev->dma = lp->rxdma;
+#endif
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk("%s: %s (rev %d) at %#lx IRQ %d",
+ dev->name, version_string, lp->revision,
+ dev->base_addr, dev->irq);
+
+#ifdef SMC_USE_DMA
+ if (lp->rxdma != -1)
+ printk(" RXDMA %d ", lp->rxdma);
+
+ if (lp->txdma != -1)
+ printk("TXDMA %d", lp->txdma);
+#endif
+ printk("\n");
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", dev->name);
+ } else {
+ /* Print the Ethernet address */
+ printk("%s: Ethernet addr: ", dev->name);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x\n", dev->dev_addr[5]);
+ }
+
+ if (lp->phy_type == 0) {
+ PRINTK("%s: No PHY found\n", dev->name);
+ } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
+ PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+ } else {
+ PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+ }
+ }
+
+err_out:
+#ifdef SMC_USE_DMA
+ if (retval) {
+ if (lp->rxdma != -1) {
+ SMC_DMA_FREE(dev, lp->rxdma);
+ }
+ if (lp->txdma != -1) {
+ SMC_DMA_FREE(dev, lp->txdma);
+ }
+ }
+#endif
+ return retval;
+}
+
+/*
+ * smc911x_init(void)
+ *
+ * Output:
+ * 0 --> there is a device
+ * anything else, error
+ */
+static int smc911x_drv_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct resource *res;
+ unsigned int *addr;
+ int ret;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Request the regions.
+ */
+ if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct smc911x_local));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto release_1;
+ }
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->dma = (unsigned char)-1;
+ ndev->irq = platform_get_irq(pdev, 0);
+
+ addr = ioremap(res->start, SMC911X_IO_EXTENT);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto release_both;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ ret = smc911x_probe(ndev, (unsigned long)addr);
+ if (ret != 0) {
+ platform_set_drvdata(pdev, NULL);
+ iounmap(addr);
+release_both:
+ free_netdev(ndev);
+release_1:
+ release_mem_region(res->start, SMC911X_IO_EXTENT);
+out:
+ printk("%s: not found (%d).\n", CARDNAME, ret);
+ }
+#ifdef SMC_USE_DMA
+ else {
+ struct smc911x_local *lp = netdev_priv(ndev);
+ lp->physaddr = res->start;
+ lp->dev = &pdev->dev;
+ }
+#endif
+
+ return ret;
+}
+
+static int smc911x_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(ndev->irq, ndev);
+
+#ifdef SMC_USE_DMA
+ {
+ struct smc911x_local *lp = netdev_priv(ndev);
+ if (lp->rxdma != -1) {
+ SMC_DMA_FREE(dev, lp->rxdma);
+ }
+ if (lp->txdma != -1) {
+ SMC_DMA_FREE(dev, lp->txdma);
+ }
+ }
+#endif
+ iounmap((void *)ndev->base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, SMC911X_IO_EXTENT);
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+ unsigned long ioaddr = ndev->base_addr;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ if (ndev) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ smc911x_shutdown(ndev);
+#if POWER_DOWN
+ /* Set D2 - Energy detect only setting */
+ SMC_SET_PMT_CTRL(2<<12);
+#endif
+ }
+ }
+ return 0;
+}
+
+static int smc911x_drv_resume(struct platform_device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ if (ndev) {
+ struct smc911x_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ smc911x_reset(ndev);
+ smc911x_enable(ndev);
+ if (lp->phy_type != 0)
+ smc911x_phy_configure(ndev);
+ netif_device_attach(ndev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver smc911x_driver = {
+ .probe = smc911x_drv_probe,
+ .remove = smc911x_drv_remove,
+ .suspend = smc911x_drv_suspend,
+ .resume = smc911x_drv_resume,
+ .driver = {
+ .name = CARDNAME,
+ },
+};
+
+static int __init smc911x_init(void)
+{
+ return platform_driver_register(&smc911x_driver);
+}
+
+static void __exit smc911x_cleanup(void)
+{
+ platform_driver_unregister(&smc911x_driver);
+}
+
+module_init(smc911x_init);
+module_exit(smc911x_cleanup);
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
new file mode 100644
index 000000000000..962a710459fc
--- /dev/null
+++ b/drivers/net/smc911x.h
@@ -0,0 +1,835 @@
+/*------------------------------------------------------------------------
+ . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device.
+ .
+ . Copyright (C) 2005 Sensoria Corp.
+ . Derived from the unified SMC91x driver by Nicolas Pitre
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ .
+ . Information contained in this file was obtained from the LAN9118
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ . Authors
+ . Dustin McIntire <dustin@sensoria.com>
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC911X_H_
+#define _SMC911X_H_
+
+/*
+ * Use the DMA feature on PXA chips
+ */
+#ifdef CONFIG_ARCH_PXA
+ #define SMC_USE_PXA_DMA 1
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+#endif
+
+
+/*
+ * Define the bus width specific IO macros
+ */
+
+#if SMC_USE_16BIT
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) ((SMC_inw(a, r) & 0xFFFF)+(SMC_inw(a+2, r)<<16))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) \
+ do{ \
+ writel(v & 0xFFFF, (a) + (r)); \
+ writel(v >> 16, (a) + (r) + 2); \
+ } while (0)
+#define SMC_insl(a, r, p, l) readsw((short*)((a) + (r)), p, l*2)
+#define SMC_outsl(a, r, p, l) writesw((short*)((a) + (r)), p, l*2)
+
+#elif SMC_USE_32BIT
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((int*)((a) + (r)), p, l)
+#define SMC_outsl(a, r, p, l) writesl((int*)((a) + (r)), p, l)
+
+#endif /* SMC_USE_16BIT */
+
+
+
+#if SMC_USE_PXA_DMA
+#define SMC_USE_DMA
+
+/*
+ * Define the request and free functions
+ * These are unfortunately architecture specific as no generic allocation
+ * mechanism exits
+ */
+#define SMC_DMA_REQUEST(dev, handler) \
+ pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev)
+
+#define SMC_DMA_FREE(dev, dma) \
+ pxa_free_dma(dma)
+
+#define SMC_DMA_ACK_IRQ(dev, dma) \
+{ \
+ if (DCSR(dma) & DCSR_BUSERR) { \
+ printk("%s: DMA %d bus error!\n", dev->name, dma); \
+ } \
+ DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
+}
+
+/*
+ * Use a DMA for RX and TX packets.
+ */
+#include <linux/dma-mapping.h>
+#include <asm/dma.h>
+#include <asm/arch/pxa-regs.h>
+
+static dma_addr_t rx_dmabuf, tx_dmabuf;
+static int rx_dmalen, tx_dmalen;
+
+#ifdef SMC_insl
+#undef SMC_insl
+#define SMC_insl(a, r, p, l) \
+ smc_pxa_dma_insl(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ *((u32 *)buf) = SMC_inl(ioaddr, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+ rx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = rx_dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_insw
+#undef SMC_insw
+#define SMC_insw(a, r, p, l) \
+ smc_pxa_dma_insw(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insw(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ while ((long)buf & 6) {
+ *((u16 *)buf) = SMC_inw(ioaddr, reg);
+ buf += 2;
+ len--;
+ }
+
+ len *= 2;
+ rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+ rx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = rx_dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsl
+#undef SMC_outsl
+#define SMC_outsl(a, r, p, l) \
+ smc_pxa_dma_outsl(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ SMC_outl(*((u32 *)buf), ioaddr, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
+ tx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DSADR(dma) = tx_dmabuf;
+ DTADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsw
+#undef SMC_outsw
+#define SMC_outsw(a, r, p, l) \
+ smc_pxa_dma_outsw(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsw(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ while ((long)buf & 6) {
+ SMC_outw(*((u16 *)buf), ioaddr, reg);
+ buf += 2;
+ len--;
+ }
+
+ len *= 2;
+ tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
+ tx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DSADR(dma) = tx_dmabuf;
+ DTADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+ DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#endif /* SMC_USE_PXA_DMA */
+
+
+/* Chip Parameters and Register Definitions */
+
+#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2)
+
+#define SMC911X_IO_EXTENT 0x100
+
+#define SMC911X_EEPROM_LEN 7
+
+/* Below are the register offsets and bit definitions
+ * of the Lan911x memory space
+ */
+#define RX_DATA_FIFO (0x00)
+
+#define TX_DATA_FIFO (0x20)
+#define TX_CMD_A_INT_ON_COMP_ (0x80000000)
+#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000)
+#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000)
+#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000)
+#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000)
+#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000)
+#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000)
+#define TX_CMD_A_INT_LAST_SEG_ (0x00001000)
+#define TX_CMD_A_BUF_SIZE_ (0x000007FF)
+#define TX_CMD_B_PKT_TAG_ (0xFFFF0000)
+#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000)
+#define TX_CMD_B_DISABLE_PADDING_ (0x00001000)
+#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF)
+
+#define RX_STATUS_FIFO (0x40)
+#define RX_STS_PKT_LEN_ (0x3FFF0000)
+#define RX_STS_ES_ (0x00008000)
+#define RX_STS_BCST_ (0x00002000)
+#define RX_STS_LEN_ERR_ (0x00001000)
+#define RX_STS_RUNT_ERR_ (0x00000800)
+#define RX_STS_MCAST_ (0x00000400)
+#define RX_STS_TOO_LONG_ (0x00000080)
+#define RX_STS_COLL_ (0x00000040)
+#define RX_STS_ETH_TYPE_ (0x00000020)
+#define RX_STS_WDOG_TMT_ (0x00000010)
+#define RX_STS_MII_ERR_ (0x00000008)
+#define RX_STS_DRIBBLING_ (0x00000004)
+#define RX_STS_CRC_ERR_ (0x00000002)
+#define RX_STATUS_FIFO_PEEK (0x44)
+#define TX_STATUS_FIFO (0x48)
+#define TX_STS_TAG_ (0xFFFF0000)
+#define TX_STS_ES_ (0x00008000)
+#define TX_STS_LOC_ (0x00000800)
+#define TX_STS_NO_CARR_ (0x00000400)
+#define TX_STS_LATE_COLL_ (0x00000200)
+#define TX_STS_MANY_COLL_ (0x00000100)
+#define TX_STS_COLL_CNT_ (0x00000078)
+#define TX_STS_MANY_DEFER_ (0x00000004)
+#define TX_STS_UNDERRUN_ (0x00000002)
+#define TX_STS_DEFERRED_ (0x00000001)
+#define TX_STATUS_FIFO_PEEK (0x4C)
+#define ID_REV (0x50)
+#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */
+#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */
+
+#define INT_CFG (0x54)
+#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */
+#define INT_CFG_INT_DEAS_CLR_ (0x00004000)
+#define INT_CFG_INT_DEAS_STS_ (0x00002000)
+#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */
+#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */
+#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */
+#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */
+
+#define INT_STS (0x58)
+#define INT_STS_SW_INT_ (0x80000000) /* R/WC */
+#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */
+#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */
+#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */
+#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */
+#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */
+#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */
+#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */
+#define INT_STS_PHY_INT_ (0x00040000) /* RO */
+#define INT_STS_PME_INT_ (0x00020000) /* R/WC */
+#define INT_STS_TXSO_ (0x00010000) /* R/WC */
+#define INT_STS_RWT_ (0x00008000) /* R/WC */
+#define INT_STS_RXE_ (0x00004000) /* R/WC */
+#define INT_STS_TXE_ (0x00002000) /* R/WC */
+//#define INT_STS_ERX_ (0x00001000) /* R/WC */
+#define INT_STS_TDFU_ (0x00000800) /* R/WC */
+#define INT_STS_TDFO_ (0x00000400) /* R/WC */
+#define INT_STS_TDFA_ (0x00000200) /* R/WC */
+#define INT_STS_TSFF_ (0x00000100) /* R/WC */
+#define INT_STS_TSFL_ (0x00000080) /* R/WC */
+//#define INT_STS_RXDF_ (0x00000040) /* R/WC */
+#define INT_STS_RDFO_ (0x00000040) /* R/WC */
+#define INT_STS_RDFL_ (0x00000020) /* R/WC */
+#define INT_STS_RSFF_ (0x00000010) /* R/WC */
+#define INT_STS_RSFL_ (0x00000008) /* R/WC */
+#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */
+#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */
+#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */
+
+#define INT_EN (0x5C)
+#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */
+#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */
+#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */
+#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */
+//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */
+#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */
+#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */
+#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */
+#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */
+#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */
+#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */
+#define INT_EN_RWT_EN_ (0x00008000) /* R/W */
+#define INT_EN_RXE_EN_ (0x00004000) /* R/W */
+#define INT_EN_TXE_EN_ (0x00002000) /* R/W */
+//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */
+#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */
+#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */
+#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */
+#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */
+#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */
+//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */
+#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */
+#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */
+#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */
+#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */
+#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */
+#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */
+#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */
+
+#define BYTE_TEST (0x64)
+#define FIFO_INT (0x68)
+#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */
+#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */
+#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */
+#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */
+
+#define RX_CFG (0x6C)
+#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */
+#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */
+#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */
+#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */
+#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */
+#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */
+#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */
+//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */
+
+#define TX_CFG (0x70)
+//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */
+//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */
+#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */
+#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */
+#define TX_CFG_TXSAO_ (0x00000004) /* R/W */
+#define TX_CFG_TX_ON_ (0x00000002) /* R/W */
+#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */
+
+#define HW_CFG (0x74)
+#define HW_CFG_TTM_ (0x00200000) /* R/W */
+#define HW_CFG_SF_ (0x00100000) /* R/W */
+#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */
+#define HW_CFG_TR_ (0x00003000) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */
+#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */
+#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */
+#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */
+#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */
+#define HW_CFG_SRST_TO_ (0x00000002) /* RO */
+#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */
+
+#define RX_DP_CTRL (0x78)
+#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */
+#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */
+
+#define RX_FIFO_INF (0x7C)
+#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */
+#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */
+
+#define TX_FIFO_INF (0x80)
+#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */
+#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */
+
+#define PMT_CTRL (0x84)
+#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */
+#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */
+#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */
+#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */
+#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */
+#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */
+#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */
+#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */
+#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */
+#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */
+#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */
+#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */
+#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */
+#define PMT_CTRL_READY_ (0x00000001) /* RO */
+
+#define GPIO_CFG (0x88)
+#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */
+#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */
+#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */
+#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */
+#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */
+#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */
+#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */
+#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */
+#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */
+#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */
+#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */
+#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */
+#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */
+#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */
+#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */
+#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */
+#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */
+#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */
+
+#define GPT_CFG (0x8C)
+#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */
+#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */
+
+#define GPT_CNT (0x90)
+#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */
+
+#define ENDIAN (0x98)
+#define FREE_RUN (0x9C)
+#define RX_DROP (0xA0)
+#define MAC_CSR_CMD (0xA4)
+#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */
+#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */
+#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */
+
+#define MAC_CSR_DATA (0xA8)
+#define AFC_CFG (0xAC)
+#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */
+#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */
+#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */
+#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */
+#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */
+#define AFC_CFG_FCADD_ (0x00000002) /* R/W */
+#define AFC_CFG_FCANY_ (0x00000001) /* R/W */
+
+#define E2P_CMD (0xB0)
+#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */
+#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */
+#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */
+#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */
+#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */
+#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */
+#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */
+#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */
+#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */
+#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */
+
+#define E2P_DATA (0xB4)
+#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */
+/* end of LAN register offsets and bit definitions */
+
+/*
+ ****************************************************************************
+ ****************************************************************************
+ * MAC Control and Status Register (Indirect Address)
+ * Offset (through the MAC_CSR CMD and DATA port)
+ ****************************************************************************
+ ****************************************************************************
+ *
+ */
+#define MAC_CR (0x01) /* R/W */
+
+/* MAC_CR - MAC Control Register */
+#define MAC_CR_RXALL_ (0x80000000)
+// TODO: delete this bit? It is not described in the data sheet.
+#define MAC_CR_HBDIS_ (0x10000000)
+#define MAC_CR_RCVOWN_ (0x00800000)
+#define MAC_CR_LOOPBK_ (0x00200000)
+#define MAC_CR_FDPX_ (0x00100000)
+#define MAC_CR_MCPAS_ (0x00080000)
+#define MAC_CR_PRMS_ (0x00040000)
+#define MAC_CR_INVFILT_ (0x00020000)
+#define MAC_CR_PASSBAD_ (0x00010000)
+#define MAC_CR_HFILT_ (0x00008000)
+#define MAC_CR_HPFILT_ (0x00002000)
+#define MAC_CR_LCOLL_ (0x00001000)
+#define MAC_CR_BCAST_ (0x00000800)
+#define MAC_CR_DISRTY_ (0x00000400)
+#define MAC_CR_PADSTR_ (0x00000100)
+#define MAC_CR_BOLMT_MASK_ (0x000000C0)
+#define MAC_CR_DFCHK_ (0x00000020)
+#define MAC_CR_TXEN_ (0x00000008)
+#define MAC_CR_RXEN_ (0x00000004)
+
+#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */
+#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */
+#define HASHH (0x04) /* R/W */
+#define HASHL (0x05) /* R/W */
+
+#define MII_ACC (0x06) /* R/W */
+#define MII_ACC_PHY_ADDR_ (0x0000F800)
+#define MII_ACC_MIIRINDA_ (0x000007C0)
+#define MII_ACC_MII_WRITE_ (0x00000002)
+#define MII_ACC_MII_BUSY_ (0x00000001)
+
+#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */
+
+#define FLOW (0x08) /* R/W */
+#define FLOW_FCPT_ (0xFFFF0000)
+#define FLOW_FCPASS_ (0x00000004)
+#define FLOW_FCEN_ (0x00000002)
+#define FLOW_FCBSY_ (0x00000001)
+
+#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */
+#define VLAN1_VTI1_ (0x0000ffff)
+
+#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */
+#define VLAN2_VTI2_ (0x0000ffff)
+
+#define WUFF (0x0B) /* WO */
+
+#define WUCSR (0x0C) /* R/W */
+#define WUCSR_GUE_ (0x00000200)
+#define WUCSR_WUFR_ (0x00000040)
+#define WUCSR_MPR_ (0x00000020)
+#define WUCSR_WAKE_EN_ (0x00000004)
+#define WUCSR_MPEN_ (0x00000002)
+
+/*
+ ****************************************************************************
+ * Chip Specific MII Defines
+ ****************************************************************************
+ *
+ * Phy register offsets and bit definitions
+ *
+ */
+
+#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */
+//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000)
+#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
+//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800)
+//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400)
+//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200)
+//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100)
+//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010)
+//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008)
+//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004)
+#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002)
+
+#define PHY_INT_SRC ((u32)29)
+#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080)
+#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040)
+#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020)
+#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010)
+#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008)
+#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004)
+#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002)
+
+#define PHY_INT_MASK ((u32)30)
+#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080)
+#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040)
+#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020)
+#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010)
+#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008)
+#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004)
+#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002)
+
+#define PHY_SPECIAL ((u32)31)
+#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000)
+#define PHY_SPECIAL_RES_ ((u16)0x0040)
+#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1)
+#define PHY_SPECIAL_SPD_ ((u16)0x001C)
+#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004)
+#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014)
+#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008)
+#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018)
+
+#define LAN911X_INTERNAL_PHY_ID (0x0007C000)
+
+/* Chip ID values */
+#define CHIP_9115 0x115
+#define CHIP_9116 0x116
+#define CHIP_9117 0x117
+#define CHIP_9118 0x118
+
+struct chip_id {
+ u16 id;
+ char *name;
+};
+
+static const struct chip_id chip_ids[] = {
+ { CHIP_9115, "LAN9115" },
+ { CHIP_9116, "LAN9116" },
+ { CHIP_9117, "LAN9117" },
+ { CHIP_9118, "LAN9118" },
+ { 0, NULL },
+};
+
+#define IS_REV_A(x) ((x & 0xFFFF)==0)
+
+/*
+ * Macros to abstract register access according to the data bus
+ * capabilities. Please use those and not the in/out primitives.
+ */
+/* FIFO read/write macros */
+#define SMC_PUSH_DATA(p, l) SMC_outsl( ioaddr, TX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_PULL_DATA(p, l) SMC_insl ( ioaddr, RX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_SET_TX_FIFO(x) SMC_outl( x, ioaddr, TX_DATA_FIFO )
+#define SMC_GET_RX_FIFO() SMC_inl( ioaddr, RX_DATA_FIFO )
+
+
+/* I/O mapped register read/write macros */
+#define SMC_GET_TX_STS_FIFO() SMC_inl( ioaddr, TX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO() SMC_inl( ioaddr, RX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO_PEEK() SMC_inl( ioaddr, RX_STATUS_FIFO_PEEK )
+#define SMC_GET_PN() (SMC_inl( ioaddr, ID_REV ) >> 16)
+#define SMC_GET_REV() (SMC_inl( ioaddr, ID_REV ) & 0xFFFF)
+#define SMC_GET_IRQ_CFG() SMC_inl( ioaddr, INT_CFG )
+#define SMC_SET_IRQ_CFG(x) SMC_outl( x, ioaddr, INT_CFG )
+#define SMC_GET_INT() SMC_inl( ioaddr, INT_STS )
+#define SMC_ACK_INT(x) SMC_outl( x, ioaddr, INT_STS )
+#define SMC_GET_INT_EN() SMC_inl( ioaddr, INT_EN )
+#define SMC_SET_INT_EN(x) SMC_outl( x, ioaddr, INT_EN )
+#define SMC_GET_BYTE_TEST() SMC_inl( ioaddr, BYTE_TEST )
+#define SMC_SET_BYTE_TEST(x) SMC_outl( x, ioaddr, BYTE_TEST )
+#define SMC_GET_FIFO_INT() SMC_inl( ioaddr, FIFO_INT )
+#define SMC_SET_FIFO_INT(x) SMC_outl( x, ioaddr, FIFO_INT )
+#define SMC_SET_FIFO_TDA(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~(0xFF<<24); \
+ SMC_SET_FIFO_INT( __mask | (x)<<24 ); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_TSL(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~(0xFF<<16); \
+ SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<16)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_RSA(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~(0xFF<<8); \
+ SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<8)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_RSL(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~0xFF; \
+ SMC_SET_FIFO_INT( __mask | ((x) & 0xFF)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_GET_RX_CFG() SMC_inl( ioaddr, RX_CFG )
+#define SMC_SET_RX_CFG(x) SMC_outl( x, ioaddr, RX_CFG )
+#define SMC_GET_TX_CFG() SMC_inl( ioaddr, TX_CFG )
+#define SMC_SET_TX_CFG(x) SMC_outl( x, ioaddr, TX_CFG )
+#define SMC_GET_HW_CFG() SMC_inl( ioaddr, HW_CFG )
+#define SMC_SET_HW_CFG(x) SMC_outl( x, ioaddr, HW_CFG )
+#define SMC_GET_RX_DP_CTRL() SMC_inl( ioaddr, RX_DP_CTRL )
+#define SMC_SET_RX_DP_CTRL(x) SMC_outl( x, ioaddr, RX_DP_CTRL )
+#define SMC_GET_PMT_CTRL() SMC_inl( ioaddr, PMT_CTRL )
+#define SMC_SET_PMT_CTRL(x) SMC_outl( x, ioaddr, PMT_CTRL )
+#define SMC_GET_GPIO_CFG() SMC_inl( ioaddr, GPIO_CFG )
+#define SMC_SET_GPIO_CFG(x) SMC_outl( x, ioaddr, GPIO_CFG )
+#define SMC_GET_RX_FIFO_INF() SMC_inl( ioaddr, RX_FIFO_INF )
+#define SMC_SET_RX_FIFO_INF(x) SMC_outl( x, ioaddr, RX_FIFO_INF )
+#define SMC_GET_TX_FIFO_INF() SMC_inl( ioaddr, TX_FIFO_INF )
+#define SMC_SET_TX_FIFO_INF(x) SMC_outl( x, ioaddr, TX_FIFO_INF )
+#define SMC_GET_GPT_CFG() SMC_inl( ioaddr, GPT_CFG )
+#define SMC_SET_GPT_CFG(x) SMC_outl( x, ioaddr, GPT_CFG )
+#define SMC_GET_RX_DROP() SMC_inl( ioaddr, RX_DROP )
+#define SMC_SET_RX_DROP(x) SMC_outl( x, ioaddr, RX_DROP )
+#define SMC_GET_MAC_CMD() SMC_inl( ioaddr, MAC_CSR_CMD )
+#define SMC_SET_MAC_CMD(x) SMC_outl( x, ioaddr, MAC_CSR_CMD )
+#define SMC_GET_MAC_DATA() SMC_inl( ioaddr, MAC_CSR_DATA )
+#define SMC_SET_MAC_DATA(x) SMC_outl( x, ioaddr, MAC_CSR_DATA )
+#define SMC_GET_AFC_CFG() SMC_inl( ioaddr, AFC_CFG )
+#define SMC_SET_AFC_CFG(x) SMC_outl( x, ioaddr, AFC_CFG )
+#define SMC_GET_E2P_CMD() SMC_inl( ioaddr, E2P_CMD )
+#define SMC_SET_E2P_CMD(x) SMC_outl( x, ioaddr, E2P_CMD )
+#define SMC_GET_E2P_DATA() SMC_inl( ioaddr, E2P_DATA )
+#define SMC_SET_E2P_DATA(x) SMC_outl( x, ioaddr, E2P_DATA )
+
+/* MAC register read/write macros */
+#define SMC_GET_MAC_CSR(a,v) \
+ do { \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | \
+ MAC_CSR_CMD_R_NOT_W_ | (a) ); \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ v = SMC_GET_MAC_DATA(); \
+ } while (0)
+#define SMC_SET_MAC_CSR(a,v) \
+ do { \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_DATA(v); \
+ SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | (a) ); \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ } while (0)
+#define SMC_GET_MAC_CR(x) SMC_GET_MAC_CSR( MAC_CR, x )
+#define SMC_SET_MAC_CR(x) SMC_SET_MAC_CSR( MAC_CR, x )
+#define SMC_GET_ADDRH(x) SMC_GET_MAC_CSR( ADDRH, x )
+#define SMC_SET_ADDRH(x) SMC_SET_MAC_CSR( ADDRH, x )
+#define SMC_GET_ADDRL(x) SMC_GET_MAC_CSR( ADDRL, x )
+#define SMC_SET_ADDRL(x) SMC_SET_MAC_CSR( ADDRL, x )
+#define SMC_GET_HASHH(x) SMC_GET_MAC_CSR( HASHH, x )
+#define SMC_SET_HASHH(x) SMC_SET_MAC_CSR( HASHH, x )
+#define SMC_GET_HASHL(x) SMC_GET_MAC_CSR( HASHL, x )
+#define SMC_SET_HASHL(x) SMC_SET_MAC_CSR( HASHL, x )
+#define SMC_GET_MII_ACC(x) SMC_GET_MAC_CSR( MII_ACC, x )
+#define SMC_SET_MII_ACC(x) SMC_SET_MAC_CSR( MII_ACC, x )
+#define SMC_GET_MII_DATA(x) SMC_GET_MAC_CSR( MII_DATA, x )
+#define SMC_SET_MII_DATA(x) SMC_SET_MAC_CSR( MII_DATA, x )
+#define SMC_GET_FLOW(x) SMC_GET_MAC_CSR( FLOW, x )
+#define SMC_SET_FLOW(x) SMC_SET_MAC_CSR( FLOW, x )
+#define SMC_GET_VLAN1(x) SMC_GET_MAC_CSR( VLAN1, x )
+#define SMC_SET_VLAN1(x) SMC_SET_MAC_CSR( VLAN1, x )
+#define SMC_GET_VLAN2(x) SMC_GET_MAC_CSR( VLAN2, x )
+#define SMC_SET_VLAN2(x) SMC_SET_MAC_CSR( VLAN2, x )
+#define SMC_SET_WUFF(x) SMC_SET_MAC_CSR( WUFF, x )
+#define SMC_GET_WUCSR(x) SMC_GET_MAC_CSR( WUCSR, x )
+#define SMC_SET_WUCSR(x) SMC_SET_MAC_CSR( WUCSR, x )
+
+/* PHY register read/write macros */
+#define SMC_GET_MII(a,phy,v) \
+ do { \
+ u32 __v; \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \
+ MII_ACC_MII_BUSY_); \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_GET_MII_DATA(v); \
+ } while (0)
+#define SMC_SET_MII(a,phy,v) \
+ do { \
+ u32 __v; \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_SET_MII_DATA(v); \
+ SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \
+ MII_ACC_MII_BUSY_ | \
+ MII_ACC_MII_WRITE_ ); \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ } while (0)
+#define SMC_GET_PHY_BMCR(phy,x) SMC_GET_MII( MII_BMCR, phy, x )
+#define SMC_SET_PHY_BMCR(phy,x) SMC_SET_MII( MII_BMCR, phy, x )
+#define SMC_GET_PHY_BMSR(phy,x) SMC_GET_MII( MII_BMSR, phy, x )
+#define SMC_GET_PHY_ID1(phy,x) SMC_GET_MII( MII_PHYSID1, phy, x )
+#define SMC_GET_PHY_ID2(phy,x) SMC_GET_MII( MII_PHYSID2, phy, x )
+#define SMC_GET_PHY_MII_ADV(phy,x) SMC_GET_MII( MII_ADVERTISE, phy, x )
+#define SMC_SET_PHY_MII_ADV(phy,x) SMC_SET_MII( MII_ADVERTISE, phy, x )
+#define SMC_GET_PHY_MII_LPA(phy,x) SMC_GET_MII( MII_LPA, phy, x )
+#define SMC_SET_PHY_MII_LPA(phy,x) SMC_SET_MII( MII_LPA, phy, x )
+#define SMC_GET_PHY_CTRL_STS(phy,x) SMC_GET_MII( PHY_MODE_CTRL_STS, phy, x )
+#define SMC_SET_PHY_CTRL_STS(phy,x) SMC_SET_MII( PHY_MODE_CTRL_STS, phy, x )
+#define SMC_GET_PHY_INT_SRC(phy,x) SMC_GET_MII( PHY_INT_SRC, phy, x )
+#define SMC_SET_PHY_INT_SRC(phy,x) SMC_SET_MII( PHY_INT_SRC, phy, x )
+#define SMC_GET_PHY_INT_MASK(phy,x) SMC_GET_MII( PHY_INT_MASK, phy, x )
+#define SMC_SET_PHY_INT_MASK(phy,x) SMC_SET_MII( PHY_INT_MASK, phy, x )
+#define SMC_GET_PHY_SPECIAL(phy,x) SMC_GET_MII( PHY_SPECIAL, phy, x )
+
+
+
+/* Misc read/write macros */
+
+#ifndef SMC_GET_MAC_ADDR
+#define SMC_GET_MAC_ADDR(addr) \
+ do { \
+ unsigned int __v; \
+ \
+ SMC_GET_MAC_CSR(ADDRL, __v); \
+ addr[0] = __v; addr[1] = __v >> 8; \
+ addr[2] = __v >> 16; addr[3] = __v >> 24; \
+ SMC_GET_MAC_CSR(ADDRH, __v); \
+ addr[4] = __v; addr[5] = __v >> 8; \
+ } while (0)
+#endif
+
+#define SMC_SET_MAC_ADDR(addr) \
+ do { \
+ SMC_SET_MAC_CSR(ADDRL, \
+ addr[0] | \
+ (addr[1] << 8) | \
+ (addr[2] << 16) | \
+ (addr[3] << 24)); \
+ SMC_SET_MAC_CSR(ADDRH, addr[4]|(addr[5] << 8));\
+ } while (0)
+
+
+#define SMC_WRITE_EEPROM_CMD(cmd, addr) \
+ do { \
+ while (SMC_GET_E2P_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_CMD(MAC_CSR_CMD_R_NOT_W_ | a ); \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ } while (0)
+
+#endif /* _SMC911X_H_ */
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index f86697da04d6..6cf16f322ad5 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -732,12 +732,9 @@ static int ifport;
struct net_device * __init smc_init(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct smc_local));
- static struct devlist *smcdev = smc_devlist;
+ struct devlist *smcdev = smc_devlist;
int err = 0;
-#ifndef NO_AUTOPROBE
- smcdev = smc_devlist;
-#endif
if (!dev)
return ERR_PTR(-ENODEV);
@@ -1607,7 +1604,7 @@ MODULE_PARM_DESC(io, "SMC 99194 I/O base address");
MODULE_PARM_DESC(irq, "SMC 99194 IRQ number");
MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)");
-int init_module(void)
+int __init init_module(void)
{
if (io == 0)
printk(KERN_WARNING
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e1be1af51201..f72a4f57905a 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -129,6 +129,24 @@
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
+#elif defined(CONFIG_MACH_LOGICPD_PXA270)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+#define SMC_USE_PXA_DMA 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
#elif defined(CONFIG_ARCH_INNOKOM) || \
defined(CONFIG_MACH_MAINSTONE) || \
defined(CONFIG_ARCH_PXA_IDP) || \
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index b2ddd5e79303..9282b4b0c022 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -345,9 +345,9 @@ static int bcm5421_enable_fiber(struct mii_phy* phy)
static int bcm5461_enable_fiber(struct mii_phy* phy)
{
- phy_write(phy, MII_NCONFIG, 0xfc0c);
- phy_write(phy, MII_BMCR, 0x4140);
- phy_write(phy, MII_NCONFIG, 0xfc0b);
+ phy_write(phy, MII_NCONFIG, 0xfc0c);
+ phy_write(phy, MII_BMCR, 0x4140);
+ phy_write(phy, MII_NCONFIG, 0xfc0b);
phy_write(phy, MII_BMCR, 0x0140);
return 0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 49ad60b72657..862c226dbbe2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.58"
-#define DRV_MODULE_RELDATE "May 22, 2006"
+#define DRV_MODULE_VERSION "3.59"
+#define DRV_MODULE_RELDATE "June 8, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
{
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
- tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
- NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
switch (kind) {
@@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp)
void (*write_op)(struct tg3 *, u32, u32);
int i;
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
- tg3_nvram_lock(tp);
- /* No matching tg3_nvram_unlock() after this because
- * chip reset below will undo the nvram lock.
- */
- tp->nvram_lock_cnt = 0;
- }
+ tg3_nvram_lock(tp);
+
+ /* No matching tg3_nvram_unlock() after this because
+ * chip reset below will undo the nvram lock.
+ */
+ tp->nvram_lock_cnt = 0;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32_f(MAC_MODE, 0);
udelay(40);
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
- /* Wait for firmware initialization to complete. */
- for (i = 0; i < 100000; i++) {
- tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
- if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
- break;
- udelay(10);
- }
- if (i >= 100000) {
- printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
- "firmware will not restart magic=%08x\n",
- tp->dev->name, val);
- return -ENODEV;
- }
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+
+ /* Chip might not be fitted with firmare. Some Sun onboard
+ * parts are configured like that. So don't signal the timeout
+ * of the above loop as an error, but do report the lack of
+ * running firmware once.
+ */
+ if (i >= 100000 &&
+ !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
+ tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
+
+ printk(KERN_INFO PFX "%s: No firmware running.\n",
+ tp->dev->name);
}
if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
@@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
{
int j;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
- return;
-
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
(EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
{
int ret;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
- return -EINVAL;
- }
-
if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
return tg3_nvram_read_using_eeprom(tp, offset, val);
@@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
{
int ret;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
- return -EINVAL;
- }
-
if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
~GRC_LCLCTRL_GPIO_OUTPUT1);
@@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
+ /* The memory arbiter has to be enabled in order for SRAM accesses
+ * to succeed. Normally on powerup the tg3 chip firmware will make
+ * sure it is enabled, but other entities such as system netboot
+ * code might disable it.
+ */
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
tp->phy_id = PHY_ID_INVALID;
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
- /* Do not even try poking around in here on Sun parts. */
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- /* All SUN chips are built-in LOMs. */
- tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
- return;
- }
+ /* Assume an onboard device by default. */
+ tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
int i;
u32 magic;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- /* Sun decided not to put the necessary bits in the
- * NVRAM of their onboard tg3 parts :(
- */
- strcpy(tp->board_part_number, "Sun 570X");
- return;
- }
-
if (tg3_nvram_read_swab(tp, 0x0, &magic))
- return;
+ goto out_not_found;
if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < 256; i += 4) {
@@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
break;
msleep(1);
}
+ if (!(tmp16 & 0x8000))
+ goto out_not_found;
+
pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
&tmp);
tmp = cpu_to_le32(tmp);
@@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
}
}
-#ifdef CONFIG_SPARC64
-static int __devinit tg3_is_sun_570X(struct tg3 *tp)
-{
- struct pci_dev *pdev = tp->pdev;
- struct pcidev_cookie *pcp = pdev->sysdata;
-
- if (pcp != NULL) {
- int node = pcp->prom_node;
- u32 venid;
- int err;
-
- err = prom_getproperty(node, "subsystem-vendor-id",
- (char *) &venid, sizeof(venid));
- if (err == 0 || err == -1)
- return 0;
- if (venid == PCI_VENDOR_ID_SUN)
- return 1;
-
- /* TG3 chips onboard the SunBlade-2500 don't have the
- * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
- * are distinguishable from non-Sun variants by being
- * named "network" by the firmware. Non-Sun cards will
- * show up as being named "ethernet".
- */
- if (!strcmp(pcp->prom_name, "network"))
- return 1;
- }
- return 0;
-}
-#endif
-
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
@@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
u16 pci_cmd;
int err;
-#ifdef CONFIG_SPARC64
- if (tg3_is_sun_570X(tp))
- tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
-#endif
-
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->write32 == tg3_write_indirect_reg32 ||
((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) ||
- (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
/* Get eeprom hw config before calling tg3_set_power_state().
@@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
#endif
mac_offset = 0x7c;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
- !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
@@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
}
if (!addr_ok) {
/* Next, try NVRAM. */
- if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
- !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
+ if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read(tp, mac_offset + 4, &lo)) {
dev->dev_addr[0] = ((hi >> 16) & 0xff);
dev->dev_addr[1] = ((hi >> 24) & 0xff);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0e29b885d449..ff0faab94bd5 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2184,7 +2184,7 @@ struct tg3 {
#define TG3_FLAG_INIT_COMPLETE 0x80000000
u32 tg3_flags2;
#define TG3_FLG2_RESTART_TIMER 0x00000001
-#define TG3_FLG2_SUN_570X 0x00000002
+/* 0x00000002 available */
#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
#define TG3_FLG2_IS_5788 0x00000008
#define TG3_FLG2_MAX_RXPEND_64 0x00000010
@@ -2216,6 +2216,7 @@ struct tg3 {
#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
#define TG3_FLG2_1SHOT_MSI 0x10000000
#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
+#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
u32 split_mode_max_reqs;
#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index e3dd144d326b..5f743b972949 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -227,12 +227,12 @@ enum {
SROMC0InfoLeaf = 27,
MediaBlockMask = 0x3f,
MediaCustomCSRs = (1 << 6),
-
+
/* PCIPM bits */
PM_Sleep = (1 << 31),
PM_Snooze = (1 << 30),
PM_Mask = PM_Sleep | PM_Snooze,
-
+
/* SIAStatus bits */
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
NWayRestart = (1 << 12),
@@ -858,7 +858,7 @@ static void de_stop_rxtx (struct de_private *de)
return;
cpu_relax();
}
-
+
printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
}
@@ -931,7 +931,7 @@ static void de_set_media (struct de_private *de)
macmode |= FullDuplex;
else
macmode &= ~FullDuplex;
-
+
if (netif_msg_link(de)) {
printk(KERN_INFO "%s: set link %s\n"
KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
@@ -966,9 +966,9 @@ static void de21040_media_timer (unsigned long data)
u32 status = dr32(SIAStatus);
unsigned int carrier;
unsigned long flags;
-
+
carrier = (status & NetCxnErr) ? 0 : 1;
-
+
if (carrier) {
if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
goto no_link_yet;
@@ -985,7 +985,7 @@ static void de21040_media_timer (unsigned long data)
return;
}
- de_link_down(de);
+ de_link_down(de);
if (de->media_lock)
return;
@@ -1039,7 +1039,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
return 0;
break;
}
-
+
return 1;
}
@@ -1050,9 +1050,9 @@ static void de21041_media_timer (unsigned long data)
u32 status = dr32(SIAStatus);
unsigned int carrier;
unsigned long flags;
-
+
carrier = (status & NetCxnErr) ? 0 : 1;
-
+
if (carrier) {
if ((de->media_type == DE_MEDIA_TP_AUTO ||
de->media_type == DE_MEDIA_TP ||
@@ -1072,7 +1072,7 @@ static void de21041_media_timer (unsigned long data)
return;
}
- de_link_down(de);
+ de_link_down(de);
/* if media type locked, don't switch media */
if (de->media_lock)
@@ -1124,7 +1124,7 @@ static void de21041_media_timer (unsigned long data)
u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
}
-
+
set_media:
spin_lock_irqsave(&de->lock, flags);
de_stop_rxtx(de);
@@ -1148,7 +1148,7 @@ static void de_media_interrupt (struct de_private *de, u32 status)
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
return;
}
-
+
BUG_ON(!(status & LinkFail));
if (netif_carrier_ok(de->dev)) {
@@ -1227,7 +1227,7 @@ static int de_init_hw (struct de_private *de)
int rc;
de_adapter_wake(de);
-
+
macmode = dr32(MacMode) & ~MacModeClear;
rc = de_reset_mac(de);
@@ -1413,7 +1413,7 @@ static int de_close (struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
spin_unlock_irqrestore(&de->lock, flags);
-
+
free_irq(dev->irq, dev);
de_free_rings(de);
@@ -1441,7 +1441,7 @@ static void de_tx_timeout (struct net_device *dev)
spin_unlock_irq(&de->lock);
enable_irq(dev->irq);
-
+
/* Update the error counts. */
__de_get_stats(de);
@@ -1451,7 +1451,7 @@ static void de_tx_timeout (struct net_device *dev)
de_init_rings(de);
de_init_hw(de);
-
+
netif_wake_queue(dev);
}
@@ -1459,7 +1459,7 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
{
int i;
u32 *rbuf = (u32 *)buf;
-
+
/* read all CSRs */
for (i = 0; i < DE_NUM_REGS; i++)
rbuf[i] = dr32(i * 8);
@@ -1474,7 +1474,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
ecmd->phy_address = 0;
ecmd->advertising = de->media_advertise;
-
+
switch (de->media_type) {
case DE_MEDIA_AUI:
ecmd->port = PORT_AUI;
@@ -1489,7 +1489,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
ecmd->speed = SPEED_10;
break;
}
-
+
if (dr32(MacMode) & FullDuplex)
ecmd->duplex = DUPLEX_FULL;
else
@@ -1529,7 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
if (ecmd->autoneg == AUTONEG_ENABLE &&
(!(ecmd->advertising & ADVERTISED_Autoneg)))
return -EINVAL;
-
+
switch (ecmd->port) {
case PORT_AUI:
new_media = DE_MEDIA_AUI;
@@ -1554,22 +1554,22 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
return -EINVAL;
break;
}
-
+
media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
-
+
if ((new_media == de->media_type) &&
(media_lock == de->media_lock) &&
(ecmd->advertising == de->media_advertise))
return 0; /* nothing to change */
-
+
de_link_down(de);
de_stop_rxtx(de);
-
+
de->media_type = new_media;
de->media_lock = media_lock;
de->media_advertise = ecmd->advertising;
de_set_media(de);
-
+
return 0;
}
@@ -1817,7 +1817,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
default: de->media_type = DE_MEDIA_TP_AUTO; break;
}
-
+
if (netif_msg_probe(de))
printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
de->board_idx, ofs,
@@ -1886,7 +1886,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
de->media[idx].csr13,
de->media[idx].csr14,
de->media[idx].csr15);
-
+
} else if (netif_msg_probe(de))
printk("\n");
@@ -2118,7 +2118,7 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irq(&de->lock);
enable_irq(dev->irq);
-
+
/* Update the error counts. */
__de_get_stats(de);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index f56094102042..da8bd0d62a3f 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -41,11 +41,11 @@
Digital Semiconductor SROM Specification. The driver currently
recognises the following chips:
- DC21040 (no SROM)
- DC21041[A]
- DC21140[A]
- DC21142
- DC21143
+ DC21040 (no SROM)
+ DC21041[A]
+ DC21140[A]
+ DC21142
+ DC21143
So far the driver is known to work with the following cards:
@@ -55,7 +55,7 @@
SMC8432
SMC9332 (w/new SROM)
ZNYX31[45]
- ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
+ ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
The driver has been tested on a relatively busy network using the DE425,
DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
@@ -106,7 +106,7 @@
loading by:
insmod de4x5 io=0xghh where g = bus number
- hh = device number
+ hh = device number
NB: autoprobing for modules is now supported by default. You may just
use:
@@ -120,11 +120,11 @@
4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
kernel with the de4x5 configuration turned off and reboot.
5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
+ 6) run the net startup bits for your new eth?? interface(s) manually
+ (usually /etc/rc.inet[12] at boot time).
7) enjoy!
- To unload a module, turn off the associated interface(s)
+ To unload a module, turn off the associated interface(s)
'ifconfig eth?? down' then 'rmmod de4x5'.
Automedia detection is included so that in principal you can disconnect
@@ -135,7 +135,7 @@
By default, the driver will now autodetect any DECchip based card.
Should you have a need to restrict the driver to DIGITAL only cards, you
can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
+ 'dec_only=1' parameter.
I've changed the timing routines to use the kernel timer and scheduling
functions so that the hangs and other assorted problems that occurred
@@ -204,7 +204,7 @@
following parameters are allowed:
fdx for full duplex
- autosense to set the media/speed; with the following
+ autosense to set the media/speed; with the following
sub-parameters:
TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
@@ -235,14 +235,14 @@
this automatically or include #define DE4X5_FORCE_EISA on or before
line 1040 in the driver.
- TO DO:
+ TO DO:
------
Revision History
----------------
Version Date Description
-
+
0.1 17-Nov-94 Initial writing. ALPHA code release.
0.2 13-Jan-95 Added PCI support for DE435's.
0.21 19-Jan-95 Added auto media detection.
@@ -251,7 +251,7 @@
Add request/release_region code.
Add loadable modules support for PCI.
Clean up loadable modules support.
- 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
Fix missed frame counter value and initialisation.
Fixed EISA probe.
0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
@@ -280,7 +280,7 @@
Add kernel timer code (h/w is too flaky).
Add MII based PHY autosense.
Add new multicasting code.
- Add new autosense algorithms for media/mode
+ Add new autosense algorithms for media/mode
selection using kernel scheduling/timing.
Re-formatted.
Made changes suggested by <jeff@router.patch.net>:
@@ -307,10 +307,10 @@
Add Accton to the list of broken cards.
Fix TX under-run bug for non DC21140 chips.
Fix boot command probe bug in alloc_device() as
- reported by <koen.gadeyne@barco.com> and
+ reported by <koen.gadeyne@barco.com> and
<orava@nether.tky.hut.fi>.
Add cache locks to prevent a race condition as
- reported by <csd@microplex.com> and
+ reported by <csd@microplex.com> and
<baba@beckman.uiuc.edu>.
Upgraded alloc_device() code.
0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
@@ -322,7 +322,7 @@
with a loopback packet.
0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
by <bhat@mundook.cs.mu.OZ.AU>
- 0.45 8-Dec-96 Include endian functions for PPC use, from work
+ 0.45 8-Dec-96 Include endian functions for PPC use, from work
by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
0.451 28-Dec-96 Added fix to allow autoprobe for modules after
suggestion from <mjacob@feral.com>.
@@ -346,14 +346,14 @@
<paubert@iram.es>.
0.52 26-Apr-97 Some changes may not credit the right people -
a disk crash meant I lost some mail.
- Change RX interrupt routine to drop rather than
- defer packets to avoid hang reported by
+ Change RX interrupt routine to drop rather than
+ defer packets to avoid hang reported by
<g.thomas@opengroup.org>.
Fix srom_exec() to return for COMPACT and type 1
infoblocks.
Added DC21142 and DC21143 functions.
Added byte counters from <phil@tazenda.demon.co.uk>
- Added SA_INTERRUPT temporary fix from
+ Added SA_INTERRUPT temporary fix from
<mjacob@feral.com>.
0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
module load: bug reported by
@@ -363,10 +363,10 @@
Make above search independent of BIOS device scan
direction.
Completed DC2114[23] autosense functions.
- 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
+ 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
<robin@intercore.com
Fix type1_infoblock() bug introduced in 0.53, from
- problem reports by
+ problem reports by
<parmee@postecss.ncrfran.france.ncr.com> and
<jo@ice.dillingen.baynet.de>.
Added argument list to set up each board from either
@@ -374,7 +374,7 @@
Added generic MII PHY functionality to deal with
newer PHY chips.
Fix the mess in 2.1.67.
- 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
+ 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
<redhat@cococo.net>.
Fix bug in pci_probe() for 64 bit systems reported
by <belliott@accessone.com>.
@@ -398,7 +398,7 @@
version. I hope nothing is broken...
Add TX done interrupt modification from suggestion
by <Austin.Donnelly@cl.cam.ac.uk>.
- Fix is_anc_capable() bug reported by
+ Fix is_anc_capable() bug reported by
<Austin.Donnelly@cl.cam.ac.uk>.
Fix type[13]_infoblock() bug: during MII search, PHY
lp->rst not run because lp->ibn not initialised -
@@ -413,7 +413,7 @@
Add an_exception() for old ZYNX346 and fix compile
warning on PPC & SPARC, from <ecd@skynet.be>.
Fix lastPCI to correctly work with compiled in
- kernels and modules from bug report by
+ kernels and modules from bug report by
<Zlatko.Calusic@CARNet.hr> et al.
0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
when media is unconnected.
@@ -425,7 +425,7 @@
0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
a 21143 by <mmporter@home.com>.
Change PCI/EISA bus probing order.
- 0.545 28-Nov-99 Further Moto SROM bug fix from
+ 0.545 28-Nov-99 Further Moto SROM bug fix from
<mporter@eng.mcd.mot.com>
Remove double checking for DEBUG_RX in de4x5_dbg_rx()
from report by <geert@linux-m68k.org>
@@ -434,8 +434,8 @@
variable 'pb', on a non de4x5 PCI device, in this
case a PCI bridge (DEC chip 21152). The value of
'pb' is now only initialized if a de4x5 chip is
- present.
- <france@handhelds.org>
+ present.
+ <france@handhelds.org>
0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
generic DMA APIs. Fixed DE425 support on Alpha.
@@ -584,7 +584,7 @@ static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
/*
** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.:
+** parameter, e.g.:
** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
**
** For a compiled in driver, place e.g.
@@ -655,7 +655,7 @@ static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
** Memory Alignment. Each descriptor is 4 longwords long. To force a
** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
+** and hence the RX descriptor ring's first entry.
*/
#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
@@ -1081,8 +1081,8 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
mdelay(2); /* Wait for 2ms */\
}
-
-static int __devinit
+
+static int __devinit
de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
{
char name[DE4X5_NAME_LENGTH + 1];
@@ -1102,12 +1102,12 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
mdelay(10);
RESET_DE4X5;
-
+
if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
return -ENXIO; /* Hardware could not reset */
}
-
- /*
+
+ /*
** Now find out what kind of DC21040/DC21041/DC21140 board we have.
*/
lp->useSROM = FALSE;
@@ -1116,21 +1116,21 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
} else {
EISA_signature(name, gendev);
}
-
+
if (*name == '\0') { /* Not found a board signature */
return -ENXIO;
}
-
+
dev->base_addr = iobase;
printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase);
-
+
printk(", h/w address ");
status = get_hw_addr(dev);
for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
printk("%2.2x:", dev->dev_addr[i]);
}
printk("%2.2x,\n", dev->dev_addr[i]);
-
+
if (status != 0) {
printk(" which has an Ethernet PROM CRC error.\n");
return -ENXIO;
@@ -1171,10 +1171,10 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
}
lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-
+
/*
** Set up the RX descriptor ring (Intels)
- ** Allocate contiguous receive buffers, long word aligned (Alphas)
+ ** Allocate contiguous receive buffers, long word aligned (Alphas)
*/
#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
for (i=0; i<NUM_RX_DESC; i++) {
@@ -1210,7 +1210,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
lp->rxRingSize = NUM_RX_DESC;
lp->txRingSize = NUM_TX_DESC;
-
+
/* Write the end of list marker to the descriptor lists */
lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
@@ -1219,7 +1219,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
outl(lp->dma_rings, DE4X5_RRBA);
outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
DE4X5_TRBA);
-
+
/* Initialise the IRQ mask and Enable/Disable */
lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
lp->irq_en = IMR_NIM | IMR_AIM;
@@ -1252,7 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
mii_get_phy(dev);
}
-
+
#ifndef __sparc_v9__
printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
#else
@@ -1260,11 +1260,11 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
#endif
((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
}
-
+
if (de4x5_debug & DEBUG_VERSION) {
printk(version);
}
-
+
/* The DE4X5-specific entries in the device structure. */
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, gendev);
@@ -1274,23 +1274,23 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
dev->get_stats = &de4x5_get_stats;
dev->set_multicast_list = &set_multicast_list;
dev->do_ioctl = &de4x5_ioctl;
-
+
dev->mem_start = 0;
-
+
/* Fill in the generic fields of the device structure. */
if ((status = register_netdev (dev))) {
dma_free_coherent (gendev, lp->dma_size,
lp->rx_ring, lp->dma_rings);
return status;
}
-
+
/* Let the adapter sleep to save power */
yawn(dev, SLEEP);
-
+
return status;
}
-
+
static int
de4x5_open(struct net_device *dev)
{
@@ -1312,15 +1312,15 @@ de4x5_open(struct net_device *dev)
*/
yawn(dev, WAKEUP);
- /*
- ** Re-initialize the DE4X5...
+ /*
+ ** Re-initialize the DE4X5...
*/
status = de4x5_init(dev);
spin_lock_init(&lp->lock);
lp->state = OPEN;
de4x5_dbg_open(dev);
-
- if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
+
+ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
lp->adapter_name, dev)) {
printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
@@ -1340,11 +1340,11 @@ de4x5_open(struct net_device *dev)
lp->interrupt = UNMASK_INTERRUPTS;
dev->trans_start = jiffies;
-
+
START_DE4X5;
-
+
de4x5_setup_intr(dev);
-
+
if (de4x5_debug & DEBUG_OPEN) {
printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
@@ -1355,7 +1355,7 @@ de4x5_open(struct net_device *dev)
printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
}
-
+
return status;
}
@@ -1369,15 +1369,15 @@ de4x5_open(struct net_device *dev)
*/
static int
de4x5_init(struct net_device *dev)
-{
+{
/* Lock out other processes whilst setting up the hardware */
netif_stop_queue(dev);
-
+
de4x5_sw_reset(dev);
-
+
/* Autoconfigure the connected port */
autoconf_media(dev);
-
+
return 0;
}
@@ -1388,7 +1388,7 @@ de4x5_sw_reset(struct net_device *dev)
u_long iobase = dev->base_addr;
int i, j, status = 0;
s32 bmr, omr;
-
+
/* Select the MII or SRL port now and RESET the MAC */
if (!lp->useSROM) {
if (lp->phy[lp->active].id != 0) {
@@ -1399,7 +1399,7 @@ de4x5_sw_reset(struct net_device *dev)
de4x5_switch_mac_port(dev);
}
- /*
+ /*
** Set the programmable burst length to 8 longwords for all the DC21140
** Fasternet chips and 4 longwords for all others: DMA errors result
** without these values. Cache align 16 long.
@@ -1416,23 +1416,23 @@ de4x5_sw_reset(struct net_device *dev)
outl(lp->dma_rings, DE4X5_RRBA);
outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
DE4X5_TRBA);
-
+
lp->rx_new = lp->rx_old = 0;
lp->tx_new = lp->tx_old = 0;
-
+
for (i = 0; i < lp->rxRingSize; i++) {
lp->rx_ring[i].status = cpu_to_le32(R_OWN);
}
-
+
for (i = 0; i < lp->txRingSize; i++) {
lp->tx_ring[i].status = cpu_to_le32(0);
}
-
+
barrier();
/* Build the setup frame depending on filtering mode */
SetMulticastFilter(dev);
-
+
load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
outl(omr|OMR_ST, DE4X5_OMR);
@@ -1445,18 +1445,18 @@ de4x5_sw_reset(struct net_device *dev)
outl(omr, DE4X5_OMR); /* Stop everything! */
if (j == 0) {
- printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
inl(DE4X5_STS));
status = -EIO;
}
-
+
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
lp->tx_old = lp->tx_new;
return status;
}
-/*
+/*
** Writes a socket buffer address to the next available transmit descriptor.
*/
static int
@@ -1469,9 +1469,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (lp->tx_enable == NO) { /* Cannot send for now */
- return -1;
+ return -1;
}
-
+
/*
** Clean out the TX ring asynchronously to interrupts - sometimes the
** interrupts are lost by delayed descriptor status updates relative to
@@ -1482,7 +1482,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
/* Test if cache is already locked - requeue skb if so */
- if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
+ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
return -1;
/* Transmit descriptor ring full or stale skb */
@@ -1509,10 +1509,10 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
lp->stats.tx_bytes += skb->len;
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-
+
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
dev->trans_start = jiffies;
-
+
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
}
@@ -1521,15 +1521,15 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
}
if (skb) de4x5_putb_cache(dev, skb);
}
-
+
lp->cache.lock = 0;
return status;
}
/*
-** The DE4X5 interrupt handler.
-**
+** The DE4X5 interrupt handler.
+**
** I/O Read/Writes through intermediate PCI bridges are never 'posted',
** so that the asserted interrupt always has some real data to work with -
** if these I/O accesses are ever changed to memory accesses, ensure the
@@ -1546,7 +1546,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
s32 imr, omr, sts, limit;
u_long iobase;
unsigned int handled = 0;
-
+
if (dev == NULL) {
printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
@@ -1554,35 +1554,35 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
lp = netdev_priv(dev);
spin_lock(&lp->lock);
iobase = dev->base_addr;
-
+
DISABLE_IRQs; /* Ensure non re-entrancy */
if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
printk("%s: Re-entering the interrupt handler.\n", dev->name);
synchronize_irq(dev->irq);
-
+
for (limit=0; limit<8; limit++) {
sts = inl(DE4X5_STS); /* Read IRQ status */
outl(sts, DE4X5_STS); /* Reset the board interrupts */
-
+
if (!(sts & lp->irq_mask)) break;/* All done */
handled = 1;
-
+
if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
de4x5_rx(dev);
-
+
if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
- de4x5_tx(dev);
-
+ de4x5_tx(dev);
+
if (sts & STS_LNF) { /* TP Link has failed */
lp->irq_mask &= ~IMR_LFM;
}
-
+
if (sts & STS_UNF) { /* Transmit underrun */
de4x5_txur(dev);
}
-
+
if (sts & STS_SE) { /* Bus Error */
STOP_DE4X5;
printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
@@ -1603,7 +1603,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
lp->interrupt = UNMASK_INTERRUPTS;
ENABLE_IRQs;
spin_unlock(&lp->lock);
-
+
return IRQ_RETVAL(handled);
}
@@ -1614,11 +1614,11 @@ de4x5_rx(struct net_device *dev)
u_long iobase = dev->base_addr;
int entry;
s32 status;
-
+
for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
entry=lp->rx_new) {
status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-
+
if (lp->rx_ovf) {
if (inl(DE4X5_MFC) & MFC_FOCM) {
de4x5_rx_ovfc(dev);
@@ -1629,7 +1629,7 @@ de4x5_rx(struct net_device *dev)
if (status & RD_FS) { /* Remember the start of frame */
lp->rx_old = entry;
}
-
+
if (status & RD_LS) { /* Valid frame status */
if (lp->tx_enable) lp->linkOK++;
if (status & RD_ES) { /* There was an error. */
@@ -1646,9 +1646,9 @@ de4x5_rx(struct net_device *dev)
struct sk_buff *skb;
short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
>> 16) - 4;
-
+
if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
- printk("%s: Insufficient memory; nuking packet.\n",
+ printk("%s: Insufficient memory; nuking packet.\n",
dev->name);
lp->stats.rx_dropped++;
} else {
@@ -1658,14 +1658,14 @@ de4x5_rx(struct net_device *dev)
skb->protocol=eth_type_trans(skb,dev);
de4x5_local_stats(dev, skb->data, pkt_len);
netif_rx(skb);
-
+
/* Update stats */
dev->last_rx = jiffies;
lp->stats.rx_packets++;
lp->stats.rx_bytes += pkt_len;
}
}
-
+
/* Change buffer ownership for this frame, back to the adapter */
for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
@@ -1674,13 +1674,13 @@ de4x5_rx(struct net_device *dev)
lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
barrier();
}
-
+
/*
** Update entry information
*/
lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
}
-
+
return 0;
}
@@ -1705,20 +1705,20 @@ de4x5_tx(struct net_device *dev)
u_long iobase = dev->base_addr;
int entry;
s32 status;
-
+
for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
if (status < 0) { /* Buffer not sent yet */
break;
} else if (status != 0x7fffffff) { /* Not setup frame */
if (status & TD_ES) { /* An error happened */
- lp->stats.tx_errors++;
+ lp->stats.tx_errors++;
if (status & TD_NC) lp->stats.tx_carrier_errors++;
if (status & TD_LC) lp->stats.tx_window_errors++;
if (status & TD_UF) lp->stats.tx_fifo_errors++;
if (status & TD_EC) lp->pktStats.excessive_collisions++;
if (status & TD_DE) lp->stats.tx_aborted_errors++;
-
+
if (TX_PKT_PENDING) {
outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
}
@@ -1727,14 +1727,14 @@ de4x5_tx(struct net_device *dev)
if (lp->tx_enable) lp->linkOK++;
}
/* Update the collision counter */
- lp->stats.collisions += ((status & TD_EC) ? 16 :
+ lp->stats.collisions += ((status & TD_EC) ? 16 :
((status & TD_CC) >> 3));
/* Free the buffer. */
if (lp->tx_skb[entry] != NULL)
de4x5_free_tx_buff(lp, entry);
}
-
+
/* Update all the pointers */
lp->tx_old = (++lp->tx_old) % lp->txRingSize;
}
@@ -1746,7 +1746,7 @@ de4x5_tx(struct net_device *dev)
else
netif_start_queue(dev);
}
-
+
return 0;
}
@@ -1755,9 +1755,9 @@ de4x5_ast(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
int next_tick = DE4X5_AUTOSENSE_MS;
-
+
disable_ast(dev);
-
+
if (lp->useSROM) {
next_tick = srom_autoconf(dev);
} else if (lp->chipset == DC21140) {
@@ -1769,7 +1769,7 @@ de4x5_ast(struct net_device *dev)
}
lp->linkOK = 0;
enable_ast(dev, next_tick);
-
+
return 0;
}
@@ -1792,11 +1792,11 @@ de4x5_txur(struct net_device *dev)
}
outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
}
-
+
return 0;
}
-static int
+static int
de4x5_rx_ovfc(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -1813,7 +1813,7 @@ de4x5_rx_ovfc(struct net_device *dev)
}
outl(omr, DE4X5_OMR);
-
+
return 0;
}
@@ -1823,22 +1823,22 @@ de4x5_close(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 imr, omr;
-
+
disable_ast(dev);
netif_stop_queue(dev);
-
+
if (de4x5_debug & DEBUG_CLOSE) {
printk("%s: Shutting down ethercard, status was %8.8x.\n",
dev->name, inl(DE4X5_STS));
}
-
- /*
+
+ /*
** We stop the DE4X5 here... mask interrupts and stop TX & RX
*/
DISABLE_IRQs;
STOP_DE4X5;
-
+
/* Free the associated irq */
free_irq(dev->irq, dev);
lp->state = CLOSED;
@@ -1846,10 +1846,10 @@ de4x5_close(struct net_device *dev)
/* Free any socket buffers */
de4x5_free_rx_buffs(dev);
de4x5_free_tx_buffs(dev);
-
+
/* Put the adapter to sleep to save power */
yawn(dev, SLEEP);
-
+
return 0;
}
@@ -1858,9 +1858,9 @@ de4x5_get_stats(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-
+
return &lp->stats;
}
@@ -1886,7 +1886,7 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
(*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
lp->pktStats.unicast++;
}
-
+
lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
if (lp->pktStats.bins[0] == 0) { /* Reset counters */
memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
@@ -1937,11 +1937,11 @@ set_multicast_list(struct net_device *dev)
omr = inl(DE4X5_OMR);
omr |= OMR_PR;
outl(omr, DE4X5_OMR);
- } else {
+ } else {
SetMulticastFilter(dev);
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
-
+
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
dev->trans_start = jiffies;
@@ -1969,20 +1969,20 @@ SetMulticastFilter(struct net_device *dev)
omr = inl(DE4X5_OMR);
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
-
+
if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
addrs=dmi->dmi_addr;
dmi=dmi->next;
- if ((*addrs & 0x01) == 1) { /* multicast address? */
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
-
+
byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-
+
byte <<= 1; /* calc offset into setup frame */
if (byte & 0x02) {
byte -= 1;
@@ -1994,14 +1994,14 @@ SetMulticastFilter(struct net_device *dev)
for (j=0; j<dev->mc_count; j++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
- for (i=0; i<ETH_ALEN; i++) {
+ for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
}
}
}
outl(omr, DE4X5_OMR);
-
+
return;
}
@@ -2031,18 +2031,18 @@ static int __init de4x5_eisa_probe (struct device *gendev)
status = -EBUSY;
goto release_reg_1;
}
-
+
if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
status = -ENOMEM;
goto release_reg_2;
}
lp = netdev_priv(dev);
-
+
cfid = (u32) inl(PCI_CFID);
lp->cfrv = (u_short) inl(PCI_CFRV);
device = (cfid >> 8) & 0x00ffff00;
vendor = (u_short) cfid;
-
+
/* Read the EISA Configuration Registers */
regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
#ifdef CONFIG_ALPHA
@@ -2050,7 +2050,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
* care about the EISA configuration, and thus doesn't
* configure the PLX bridge properly. Oh well... Simply mimic
* the EISA config file to sort it out. */
-
+
/* EISA REG1: Assert DecChip 21040 HW Reset */
outb (ER1_IAM | 1, EISA_REG1);
mdelay (1);
@@ -2061,12 +2061,12 @@ static int __init de4x5_eisa_probe (struct device *gendev)
/* EISA REG3: R/W Burst Transfer Enable */
outb (ER3_BWE | ER3_BRE, EISA_REG3);
-
+
/* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
#endif
irq = de4x5_irq[(regval >> 1) & 0x03];
-
+
if (is_DC2114x) {
device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
}
@@ -2077,7 +2077,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
outl(0x00006000, PCI_CFLT);
outl(iobase, PCI_CBIO);
-
+
DevicePresent(dev, EISA_APROM);
dev->irq = irq;
@@ -2102,7 +2102,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
dev = device->driver_data;
iobase = dev->base_addr;
-
+
unregister_netdev (dev);
free_netdev (dev);
release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
@@ -2131,11 +2131,11 @@ MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
/*
** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple
+** SROM, so that in multiport cards that have one SROM shared between multiple
** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
** For single port cards this is a time waster...
*/
-static void __devinit
+static void __devinit
srom_search(struct net_device *dev, struct pci_dev *pdev)
{
u_char pb;
@@ -2163,7 +2163,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
/* Set the device number information */
lp->device = PCI_SLOT(this_dev->devfn);
lp->bus_num = pb;
-
+
/* Set the chipset information */
if (is_DC2114x) {
device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2176,7 +2176,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
/* Fetch the IRQ to be used */
irq = this_dev->irq;
if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-
+
/* Check if I/O accesses are enabled */
pci_read_config_word(this_dev, PCI_COMMAND, &status);
if (!(status & PCI_COMMAND_IO)) continue;
@@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->bus = PCI;
lp->bus_num = 0;
-
+
/* Search for an SROM on this bus */
if (lp->bus_num != pb) {
lp->bus_num = pb;
@@ -2267,7 +2267,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
/* Set the device number information */
lp->device = dev_num;
lp->bus_num = pb;
-
+
/* Set the chipset information */
if (is_DC2114x) {
device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2283,7 +2283,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
error = -ENODEV;
goto free_dev;
}
-
+
/* Check if I/O accesses and Bus Mastering are enabled */
pci_read_config_word(pdev, PCI_COMMAND, &status);
#ifdef __powerpc__
@@ -2322,7 +2322,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
}
dev->irq = irq;
-
+
if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
goto release;
}
@@ -2377,7 +2377,7 @@ static struct pci_driver de4x5_pci_driver = {
** Auto configure the media here rather than setting the port at compile
** time. This routine is called by de4x5_init() and when a loss of media is
** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been
+** [TP] or no recent receive activity) to check whether the user has been
** sneaky and changed the port on us.
*/
static int
@@ -2405,7 +2405,7 @@ autoconf_media(struct net_device *dev)
}
enable_ast(dev, next_tick);
-
+
return (lp->media);
}
@@ -2428,7 +2428,7 @@ dc21040_autoconf(struct net_device *dev)
u_long iobase = dev->base_addr;
int next_tick = DE4X5_AUTOSENSE_MS;
s32 imr;
-
+
switch (lp->media) {
case INIT:
DISABLE_IRQs;
@@ -2447,36 +2447,36 @@ dc21040_autoconf(struct net_device *dev)
lp->local_state = 0;
next_tick = dc21040_autoconf(dev);
break;
-
+
case TP:
- next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
+ next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
TP_SUSPECT, test_tp);
break;
-
+
case TP_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
break;
-
+
case BNC:
case AUI:
case BNC_AUI:
- next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
+ next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
BNC_AUI_SUSPECT, ping_media);
break;
-
+
case BNC_AUI_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
break;
-
+
case EXT_SIA:
- next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
+ next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
NC, EXT_SIA_SUSPECT, ping_media);
break;
-
+
case EXT_SIA_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
break;
-
+
case NC:
/* default to TP for all */
reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
@@ -2488,13 +2488,13 @@ dc21040_autoconf(struct net_device *dev)
lp->tx_enable = NO;
break;
}
-
+
return next_tick;
}
static int
dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
- int next_state, int suspect_state,
+ int next_state, int suspect_state,
int (*fn)(struct net_device *, int))
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -2507,7 +2507,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo
lp->local_state++;
next_tick = 500;
break;
-
+
case 1:
if (!lp->tx_enable) {
linkBad = fn(dev, timeout);
@@ -2527,7 +2527,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo
}
break;
}
-
+
return next_tick;
}
@@ -2582,7 +2582,7 @@ dc21041_autoconf(struct net_device *dev)
u_long iobase = dev->base_addr;
s32 sts, irqs, irq_mask, imr, omr;
int next_tick = DE4X5_AUTOSENSE_MS;
-
+
switch (lp->media) {
case INIT:
DISABLE_IRQs;
@@ -2603,7 +2603,7 @@ dc21041_autoconf(struct net_device *dev)
lp->local_state = 0;
next_tick = dc21041_autoconf(dev);
break;
-
+
case TP_NW:
if (lp->timeout < 0) {
omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
@@ -2623,7 +2623,7 @@ dc21041_autoconf(struct net_device *dev)
next_tick = dc21041_autoconf(dev);
}
break;
-
+
case ANS:
if (!lp->tx_enable) {
irqs = STS_LNP;
@@ -2645,11 +2645,11 @@ dc21041_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case ANS_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
break;
-
+
case TP:
if (!lp->tx_enable) {
if (lp->timeout < 0) {
@@ -2679,11 +2679,11 @@ dc21041_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case TP_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
break;
-
+
case AUI:
if (!lp->tx_enable) {
if (lp->timeout < 0) {
@@ -2709,11 +2709,11 @@ dc21041_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case AUI_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
break;
-
+
case BNC:
switch (lp->local_state) {
case 0:
@@ -2731,7 +2731,7 @@ dc21041_autoconf(struct net_device *dev)
next_tick = dc21041_autoconf(dev);
}
break;
-
+
case 1:
if (!lp->tx_enable) {
if ((sts = ping_media(dev, 3000)) < 0) {
@@ -2751,11 +2751,11 @@ dc21041_autoconf(struct net_device *dev)
break;
}
break;
-
+
case BNC_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
break;
-
+
case NC:
omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
outl(omr | OMR_FDX, DE4X5_OMR);
@@ -2768,7 +2768,7 @@ dc21041_autoconf(struct net_device *dev)
lp->tx_enable = NO;
break;
}
-
+
return next_tick;
}
@@ -2784,9 +2784,9 @@ dc21140m_autoconf(struct net_device *dev)
int ana, anlpa, cap, cr, slnk, sr;
int next_tick = DE4X5_AUTOSENSE_MS;
u_long imr, omr, iobase = dev->base_addr;
-
+
switch(lp->media) {
- case INIT:
+ case INIT:
if (lp->timeout < 0) {
DISABLE_IRQs;
lp->tx_enable = FALSE;
@@ -2813,7 +2813,7 @@ dc21140m_autoconf(struct net_device *dev)
lp->media = _100Mb;
} else if (lp->autosense == _10Mb) {
lp->media = _10Mb;
- } else if ((lp->autosense == AUTO) &&
+ } else if ((lp->autosense == AUTO) &&
((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -2831,7 +2831,7 @@ dc21140m_autoconf(struct net_device *dev)
next_tick = dc21140m_autoconf(dev);
}
break;
-
+
case ANS:
switch (lp->local_state) {
case 0:
@@ -2851,7 +2851,7 @@ dc21140m_autoconf(struct net_device *dev)
next_tick = dc21140m_autoconf(dev);
}
break;
-
+
case 1:
if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
next_tick = sr & ~TIMER_CB;
@@ -2862,7 +2862,7 @@ dc21140m_autoconf(struct net_device *dev)
lp->tmp = MII_SR_ASSC;
anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
+ if (!(anlpa & MII_ANLPA_RF) &&
(cap = anlpa & MII_ANLPA_TAF & ana)) {
if (cap & MII_ANA_100M) {
lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -2879,10 +2879,10 @@ dc21140m_autoconf(struct net_device *dev)
break;
}
break;
-
+
case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
if (lp->timeout < 0) {
- lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
+ lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
(~gep_rd(dev) & GEP_LNP));
SET_100Mb_PDET;
}
@@ -2899,7 +2899,7 @@ dc21140m_autoconf(struct net_device *dev)
next_tick = dc21140m_autoconf(dev);
}
break;
-
+
case _100Mb: /* Set 100Mb/s */
next_tick = 3000;
if (!lp->tx_enable) {
@@ -2933,7 +2933,7 @@ dc21140m_autoconf(struct net_device *dev)
}
}
break;
-
+
case NC:
if (lp->media != lp->c_media) {
de4x5_dbg_media(dev);
@@ -2943,7 +2943,7 @@ dc21140m_autoconf(struct net_device *dev)
lp->tx_enable = FALSE;
break;
}
-
+
return next_tick;
}
@@ -3002,7 +3002,7 @@ dc2114x_autoconf(struct net_device *dev)
lp->media = AUI;
} else {
lp->media = SPD_DET;
- if ((lp->infoblock_media == ANS) &&
+ if ((lp->infoblock_media == ANS) &&
((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -3014,7 +3014,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
}
break;
-
+
case ANS:
switch (lp->local_state) {
case 0:
@@ -3034,7 +3034,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
}
break;
-
+
case 1:
if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
next_tick = sr & ~TIMER_CB;
@@ -3045,7 +3045,7 @@ dc2114x_autoconf(struct net_device *dev)
lp->tmp = MII_SR_ASSC;
anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
+ if (!(anlpa & MII_ANLPA_RF) &&
(cap = anlpa & MII_ANLPA_TAF & ana)) {
if (cap & MII_ANA_100M) {
lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -3087,11 +3087,11 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case AUI_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
break;
-
+
case BNC:
switch (lp->local_state) {
case 0:
@@ -3109,7 +3109,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
}
break;
-
+
case 1:
if (!lp->tx_enable) {
if ((sts = ping_media(dev, 3000)) < 0) {
@@ -3130,11 +3130,11 @@ dc2114x_autoconf(struct net_device *dev)
break;
}
break;
-
+
case BNC_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
break;
-
+
case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
if (srom_map_media(dev) < 0) {
lp->tcount++;
@@ -3161,7 +3161,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
} else if (((lp->media == _100Mb) && is_100_up(dev)) ||
(((lp->media == _10Mb) || (lp->media == TP) ||
- (lp->media == BNC) || (lp->media == AUI)) &&
+ (lp->media == BNC) || (lp->media == AUI)) &&
is_10_up(dev))) {
next_tick = dc2114x_autoconf(dev);
} else {
@@ -3169,7 +3169,7 @@ dc2114x_autoconf(struct net_device *dev)
lp->media = INIT;
}
break;
-
+
case _10Mb:
next_tick = 3000;
if (!lp->tx_enable) {
@@ -3208,7 +3208,7 @@ printk("Huh?: media:%02x\n", lp->media);
lp->media = INIT;
break;
}
-
+
return next_tick;
}
@@ -3231,7 +3231,7 @@ srom_map_media(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
lp->fdx = 0;
- if (lp->infoblock_media == lp->media)
+ if (lp->infoblock_media == lp->media)
return 0;
switch(lp->infoblock_media) {
@@ -3270,7 +3270,7 @@ srom_map_media(struct net_device *dev)
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = TRUE;
- case SROM_100BASEF:
+ case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
break;
@@ -3280,8 +3280,8 @@ srom_map_media(struct net_device *dev)
lp->fdx = lp->params.fdx;
break;
- default:
- printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
+ default:
+ printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
lp->infoblock_media);
return -1;
break;
@@ -3359,7 +3359,7 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14,
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 sts, csr12;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
@@ -3372,22 +3372,22 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14,
/* clear all pending interrupts */
sts = inl(DE4X5_STS);
outl(sts, DE4X5_STS);
-
+
/* clear csr12 NRA and SRA bits */
if ((lp->chipset == DC21041) || lp->useSROM) {
csr12 = inl(DE4X5_SISR);
outl(csr12, DE4X5_SISR);
}
}
-
+
sts = inl(DE4X5_STS) & ~TIMER_CB;
-
+
if (!(sts & irqs) && --lp->timeout) {
sts = 100 | TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return sts;
}
@@ -3397,11 +3397,11 @@ test_tp(struct net_device *dev, s32 msec)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
int sisr;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
}
-
+
sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
if (sisr && --lp->timeout) {
@@ -3409,7 +3409,7 @@ test_tp(struct net_device *dev, s32 msec)
} else {
lp->timeout = -1;
}
-
+
return sisr;
}
@@ -3436,7 +3436,7 @@ test_for_100Mb(struct net_device *dev, int msec)
lp->timeout = msec/SAMPLE_INTERVAL;
}
}
-
+
if (lp->phy[lp->active].id || lp->useSROM) {
gep = is_100_up(dev) | is_spd_100(dev);
} else {
@@ -3447,7 +3447,7 @@ test_for_100Mb(struct net_device *dev, int msec)
} else {
lp->timeout = -1;
}
-
+
return gep;
}
@@ -3459,13 +3459,13 @@ wait_for_link(struct net_device *dev)
if (lp->timeout < 0) {
lp->timeout = 1;
}
-
+
if (lp->timeout--) {
return TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return 0;
}
@@ -3479,21 +3479,21 @@ test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec)
struct de4x5_private *lp = netdev_priv(dev);
int test;
u_long iobase = dev->base_addr;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
}
-
+
if (pol) pol = ~0;
reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
test = (reg ^ pol) & mask;
-
+
if (test && --lp->timeout) {
reg = 100 | TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return reg;
}
@@ -3503,7 +3503,7 @@ is_spd_100(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
int spd;
-
+
if (lp->useMII) {
spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
spd = ~(spd ^ lp->phy[lp->active].spd.value);
@@ -3517,7 +3517,7 @@ is_spd_100(struct net_device *dev)
spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
(lp->linkOK & ~lp->asBitValid);
}
-
+
return spd;
}
@@ -3526,7 +3526,7 @@ is_100_up(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (lp->useMII) {
/* Double read for sticky bits & temporary drops */
mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3547,7 +3547,7 @@ is_10_up(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (lp->useMII) {
/* Double read for sticky bits & temporary drops */
mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3570,7 +3570,7 @@ is_anc_capable(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
@@ -3590,24 +3590,24 @@ ping_media(struct net_device *dev, int msec)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
int sisr;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
-
+
lp->tmp = lp->tx_new; /* Remember the ring position */
load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD);
}
-
+
sisr = inl(DE4X5_SISR);
- if ((!(sisr & SISR_NCR)) &&
- ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
+ if ((!(sisr & SISR_NCR)) &&
+ ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
(--lp->timeout)) {
sisr = 100 | TIMER_CB;
} else {
- if ((!(sisr & SISR_NCR)) &&
+ if ((!(sisr & SISR_NCR)) &&
!(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
lp->timeout) {
sisr = 0;
@@ -3616,7 +3616,7 @@ ping_media(struct net_device *dev, int msec)
}
lp->timeout = -1;
}
-
+
return sisr;
}
@@ -3668,7 +3668,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
} else { /* Linear buffer */
memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
}
-
+
return p;
#endif
}
@@ -3751,23 +3751,23 @@ de4x5_rst_desc_ring(struct net_device *dev)
outl(lp->dma_rings, DE4X5_RRBA);
outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
DE4X5_TRBA);
-
+
lp->rx_new = lp->rx_old = 0;
lp->tx_new = lp->tx_old = 0;
-
+
for (i = 0; i < lp->rxRingSize; i++) {
lp->rx_ring[i].status = cpu_to_le32(R_OWN);
}
-
+
for (i = 0; i < lp->txRingSize; i++) {
lp->tx_ring[i].status = cpu_to_le32(0);
}
-
+
barrier();
lp->cache.save_cnt--;
START_DE4X5;
}
-
+
return;
}
@@ -3792,7 +3792,7 @@ de4x5_cache_state(struct net_device *dev, int flag)
gep_wr(lp->cache.gepc, dev);
gep_wr(lp->cache.gep, dev);
} else {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
lp->cache.csr15);
}
break;
@@ -3854,25 +3854,25 @@ test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 sts, ans;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
outl(irq_mask, DE4X5_IMR);
-
+
/* clear all pending interrupts */
sts = inl(DE4X5_STS);
outl(sts, DE4X5_STS);
}
-
+
ans = inl(DE4X5_SISR) & SISR_ANS;
sts = inl(DE4X5_STS) & ~TIMER_CB;
-
+
if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
sts = 100 | TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return sts;
}
@@ -3882,7 +3882,7 @@ de4x5_setup_intr(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 imr, sts;
-
+
if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
imr = 0;
UNMASK_IRQs;
@@ -3890,7 +3890,7 @@ de4x5_setup_intr(struct net_device *dev)
outl(sts, DE4X5_STS);
ENABLE_IRQs;
}
-
+
return;
}
@@ -3936,17 +3936,17 @@ create_packet(struct net_device *dev, char *frame, int len)
{
int i;
char *buf = frame;
-
+
for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
*buf++ = dev->dev_addr[i];
}
for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
*buf++ = dev->dev_addr[i];
}
-
+
*buf++ = 0; /* Packet length (2 bytes) */
*buf++ = 1;
-
+
return;
}
@@ -3978,7 +3978,7 @@ static int
PCI_signature(char *name, struct de4x5_private *lp)
{
int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
-
+
if (lp->chipset == DC21040) {
strcpy(name, "DE434/5");
return status;
@@ -4007,7 +4007,7 @@ PCI_signature(char *name, struct de4x5_private *lp)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
lp->useSROM = TRUE;
}
-
+
return status;
}
@@ -4024,7 +4024,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
{
int i, j=0;
struct de4x5_private *lp = netdev_priv(dev);
-
+
if (lp->chipset == DC21040) {
if (lp->bus == EISA) {
enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
@@ -4049,7 +4049,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
}
de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
}
-
+
return;
}
@@ -4071,11 +4071,11 @@ enet_addr_rst(u_long aprom_addr)
short sigLength=0;
s8 data;
int i, j;
-
+
dev.llsig.a = ETH_PROM_SIG;
dev.llsig.b = ETH_PROM_SIG;
sigLength = sizeof(u32) << 1;
-
+
for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
data = inb(aprom_addr);
if (dev.Sig[j] == data) { /* track signature */
@@ -4088,7 +4088,7 @@ enet_addr_rst(u_long aprom_addr)
}
}
}
-
+
return;
}
@@ -4111,7 +4111,7 @@ get_hw_addr(struct net_device *dev)
for (i=0,k=0,j=0;j<3;j++) {
k <<= 1;
if (k > 0xffff) k-=0xffff;
-
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4133,11 +4133,11 @@ get_hw_addr(struct net_device *dev)
k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
dev->dev_addr[i++] = (u_char) tmp;
}
-
+
if (k > 0xffff) k-=0xffff;
}
if (k == 0xffff) k=0;
-
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4156,7 +4156,7 @@ get_hw_addr(struct net_device *dev)
srom_repair(dev, broken);
#ifdef CONFIG_PPC_MULTIPLATFORM
- /*
+ /*
** If the address starts with 00 a0, we have to bit-reverse
** each byte of the address.
*/
@@ -4245,7 +4245,7 @@ test_bad_enet(struct net_device *dev, int status)
for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
if ((tmp == 0) || (tmp == 0x5fa)) {
- if ((lp->chipset == last.chipset) &&
+ if ((lp->chipset == last.chipset) &&
(lp->bus_num == last.bus) && (lp->bus_num > 0)) {
for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
for (i=ETH_ALEN-1; i>2; --i) {
@@ -4275,7 +4275,7 @@ test_bad_enet(struct net_device *dev, int status)
static int
an_exception(struct de4x5_private *lp)
{
- if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
+ if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
(*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
return -1;
}
@@ -4290,11 +4290,11 @@ static short
srom_rd(u_long addr, u_char offset)
{
sendto_srom(SROM_RD | SROM_SR, addr);
-
+
srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-
+
return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
}
@@ -4304,7 +4304,7 @@ srom_latch(u_int command, u_long addr)
sendto_srom(command, addr);
sendto_srom(command | DT_CLK, addr);
sendto_srom(command, addr);
-
+
return;
}
@@ -4314,7 +4314,7 @@ srom_command(u_int command, u_long addr)
srom_latch(command, addr);
srom_latch(command, addr);
srom_latch((command & 0x0000ff00) | DT_CS, addr);
-
+
return;
}
@@ -4322,15 +4322,15 @@ static void
srom_address(u_int command, u_long addr, u_char offset)
{
int i, a;
-
+
a = offset << 2;
for (i=0; i<6; i++, a <<= 1) {
srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
}
udelay(1);
-
+
i = (getfrom_srom(addr) >> 3) & 0x01;
-
+
return;
}
@@ -4340,17 +4340,17 @@ srom_data(u_int command, u_long addr)
int i;
short word = 0;
s32 tmp;
-
+
for (i=0; i<16; i++) {
sendto_srom(command | DT_CLK, addr);
tmp = getfrom_srom(addr);
sendto_srom(command, addr);
-
+
word = (word << 1) | ((tmp >> 3) & 0x01);
}
-
+
sendto_srom(command & 0x0000ff00, addr);
-
+
return word;
}
@@ -4359,13 +4359,13 @@ static void
srom_busy(u_int command, u_long addr)
{
sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-
+
while (!((getfrom_srom(addr) >> 3) & 0x01)) {
mdelay(1);
}
-
+
sendto_srom(command & 0x0000ff00, addr);
-
+
return;
}
*/
@@ -4375,7 +4375,7 @@ sendto_srom(u_int command, u_long addr)
{
outl(command, addr);
udelay(1);
-
+
return;
}
@@ -4383,10 +4383,10 @@ static int
getfrom_srom(u_long addr)
{
s32 tmp;
-
+
tmp = inl(addr);
udelay(1);
-
+
return tmp;
}
@@ -4403,7 +4403,7 @@ srom_infoleaf_info(struct net_device *dev)
}
if (i == INFOLEAF_SIZE) {
lp->useSROM = FALSE;
- printk("%s: Cannot find correct chipset for SROM decoding!\n",
+ printk("%s: Cannot find correct chipset for SROM decoding!\n",
dev->name);
return -ENXIO;
}
@@ -4420,7 +4420,7 @@ srom_infoleaf_info(struct net_device *dev)
}
if (i == 0) {
lp->useSROM = FALSE;
- printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
+ printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
dev->name, lp->device);
return -ENXIO;
}
@@ -4494,9 +4494,9 @@ srom_exec(struct net_device *dev, u_char *p)
if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
if (lp->chipset != DC21140) RESET_SIA;
-
+
while (count--) {
- gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
+ gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
*p++ : TWIDDLE(w++)), dev);
mdelay(2); /* 2ms per action */
}
@@ -4514,13 +4514,13 @@ srom_exec(struct net_device *dev, u_char *p)
** unless I implement the DC21041 SROM functions. There's no need
** since the existing code will be satisfactory for all boards.
*/
-static int
+static int
dc21041_infoleaf(struct net_device *dev)
{
return DE4X5_AUTOSENSE_MS;
}
-static int
+static int
dc21140_infoleaf(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4558,7 +4558,7 @@ dc21140_infoleaf(struct net_device *dev)
return next_tick & ~TIMER_CB;
}
-static int
+static int
dc21142_infoleaf(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4593,7 +4593,7 @@ dc21142_infoleaf(struct net_device *dev)
return next_tick & ~TIMER_CB;
}
-static int
+static int
dc21143_infoleaf(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4631,7 +4631,7 @@ dc21143_infoleaf(struct net_device *dev)
** The compact infoblock is only designed for DC21140[A] chips, so
** we'll reuse the dc21140m_autoconf function. Non MII media only.
*/
-static int
+static int
compact_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4671,7 +4671,7 @@ compact_infoblock(struct net_device *dev, u_char count, u_char *p)
/*
** This block describes non MII media for the DC21140[A] only.
*/
-static int
+static int
type0_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4711,7 +4711,7 @@ type0_infoblock(struct net_device *dev, u_char count, u_char *p)
/* These functions are under construction! */
-static int
+static int
type1_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4750,7 +4750,7 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p)
return dc21140m_autoconf(dev);
}
-static int
+static int
type2_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4791,7 +4791,7 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p)
return dc2114x_autoconf(dev);
}
-static int
+static int
type3_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4833,7 +4833,7 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
return dc2114x_autoconf(dev);
}
-static int
+static int
type4_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4878,7 +4878,7 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p)
** This block type provides information for resetting external devices
** (chips) through the General Purpose Register.
*/
-static int
+static int
type5_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4916,7 +4916,7 @@ mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
mii_address(phyreg, ioaddr); /* PHY Register to read */
mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
-
+
return mii_rdata(ioaddr); /* Read data */
}
@@ -4931,7 +4931,7 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
data = mii_swap(data, 16); /* Swap data bit ordering */
mii_wdata(data, 16, ioaddr); /* Write data */
-
+
return;
}
@@ -4940,12 +4940,12 @@ mii_rdata(u_long ioaddr)
{
int i;
s32 tmp = 0;
-
+
for (i=0; i<16; i++) {
tmp <<= 1;
tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
}
-
+
return tmp;
}
@@ -4953,12 +4953,12 @@ static void
mii_wdata(int data, int len, u_long ioaddr)
{
int i;
-
+
for (i=0; i<len; i++) {
sendto_mii(MII_MWR | MII_WR, data, ioaddr);
data >>= 1;
}
-
+
return;
}
@@ -4966,13 +4966,13 @@ static void
mii_address(u_char addr, u_long ioaddr)
{
int i;
-
+
addr = mii_swap(addr, 5);
for (i=0; i<5; i++) {
sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
addr >>= 1;
}
-
+
return;
}
@@ -4980,12 +4980,12 @@ static void
mii_ta(u_long rw, u_long ioaddr)
{
if (rw == MII_STWR) {
- sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
- sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
} else {
getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
}
-
+
return;
}
@@ -4993,13 +4993,13 @@ static int
mii_swap(int data, int len)
{
int i, tmp = 0;
-
+
for (i=0; i<len; i++) {
tmp <<= 1;
tmp |= (data & 1);
data >>= 1;
}
-
+
return tmp;
}
@@ -5007,13 +5007,13 @@ static void
sendto_mii(u32 command, int data, u_long ioaddr)
{
u32 j;
-
+
j = (data & 1) << 17;
outl(command | j, ioaddr);
udelay(1);
outl(command | MII_MDC | j, ioaddr);
udelay(1);
-
+
return;
}
@@ -5024,7 +5024,7 @@ getfrom_mii(u32 command, u_long ioaddr)
udelay(1);
outl(command | MII_MDC, ioaddr);
udelay(1);
-
+
return ((inl(ioaddr) >> 19) & 1);
}
@@ -5085,7 +5085,7 @@ mii_get_phy(struct net_device *dev)
u_long iobase = dev->base_addr;
int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
int id;
-
+
lp->active = 0;
lp->useMII = TRUE;
@@ -5094,7 +5094,7 @@ mii_get_phy(struct net_device *dev)
lp->phy[lp->active].addr = i;
if (i==0) n++; /* Count cycles */
while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
- id = mii_get_oui(i, DE4X5_MII);
+ id = mii_get_oui(i, DE4X5_MII);
if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
for (j=0; j<limit; j++) { /* Search PHY table */
if (id != phy_info[j].id) continue; /* ID match? */
@@ -5133,7 +5133,7 @@ mii_get_phy(struct net_device *dev)
for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-
+
de4x5_dbg_mii(dev, k);
}
}
@@ -5148,12 +5148,12 @@ build_setup_frame(struct net_device *dev, int mode)
struct de4x5_private *lp = netdev_priv(dev);
int i;
char *pa = lp->setup_frame;
-
+
/* Initialise the setup frame */
if (mode == ALL) {
memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
}
-
+
if (lp->setup_f == HASH_PERF) {
for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
*(pa + i) = dev->dev_addr[i]; /* Host address */
@@ -5170,7 +5170,7 @@ build_setup_frame(struct net_device *dev, int mode)
if (i & 0x01) pa += 4;
}
}
-
+
return pa; /* Points to the next entry */
}
@@ -5178,7 +5178,7 @@ static void
enable_ast(struct net_device *dev, u32 time_out)
{
timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
-
+
return;
}
@@ -5186,9 +5186,9 @@ static void
disable_ast(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
-
+
del_timer(&lp->timer);
-
+
return;
}
@@ -5207,10 +5207,10 @@ de4x5_switch_mac_port(struct net_device *dev)
omr |= lp->infoblock_csr6;
if (omr & OMR_PS) omr |= OMR_HBD;
outl(omr, DE4X5_OMR);
-
+
/* Soft Reset */
RESET_DE4X5;
-
+
/* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
if (lp->chipset == DC21140) {
gep_wr(lp->cache.gepc, dev);
@@ -5263,21 +5263,21 @@ timeout(struct net_device *dev, void (*fn)(u_long data), u_long data, u_long mse
{
struct de4x5_private *lp = netdev_priv(dev);
int dt;
-
+
/* First, cancel any pending timer events */
del_timer(&lp->timer);
-
+
/* Convert msec to ticks */
dt = (msec * HZ) / 1000;
if (dt==0) dt=1;
-
+
/* Set up timer */
init_timer(&lp->timer);
lp->timer.expires = jiffies + dt;
lp->timer.function = fn;
lp->timer.data = data;
add_timer(&lp->timer);
-
+
return;
}
@@ -5375,7 +5375,7 @@ de4x5_dbg_open(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
int i;
-
+
if (de4x5_debug & DEBUG_OPEN) {
printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
printk("\tphysical address: ");
@@ -5413,11 +5413,11 @@ de4x5_dbg_open(struct net_device *dev)
}
}
printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size: \nRX: %d\nTX: %d\n",
- (short)lp->rxRingSize,
- (short)lp->txRingSize);
+ printk("Ring size: \nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
}
-
+
return;
}
@@ -5426,7 +5426,7 @@ de4x5_dbg_mii(struct net_device *dev, int k)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (de4x5_debug & DEBUG_MII) {
printk("\nMII device address: %d\n", lp->phy[k].addr);
printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
@@ -5445,7 +5445,7 @@ de4x5_dbg_mii(struct net_device *dev, int k)
printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
}
}
-
+
return;
}
@@ -5453,17 +5453,17 @@ static void
de4x5_dbg_media(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
-
+
if (lp->media != lp->c_media) {
if (de4x5_debug & DEBUG_MEDIA) {
printk("%s: media is %s%s\n", dev->name,
(lp->media == NC ? "unconnected, link down or incompatible connection" :
(lp->media == TP ? "TP" :
(lp->media == ANS ? "TP/Nway" :
- (lp->media == BNC ? "BNC" :
- (lp->media == AUI ? "AUI" :
- (lp->media == BNC_AUI ? "BNC/AUI" :
- (lp->media == EXT_SIA ? "EXT SIA" :
+ (lp->media == BNC ? "BNC" :
+ (lp->media == AUI ? "AUI" :
+ (lp->media == BNC_AUI ? "BNC/AUI" :
+ (lp->media == EXT_SIA ? "EXT SIA" :
(lp->media == _100Mb ? "100Mb/s" :
(lp->media == _10Mb ? "10Mb/s" :
"???"
@@ -5471,7 +5471,7 @@ de4x5_dbg_media(struct net_device *dev)
}
lp->c_media = lp->media;
}
-
+
return;
}
@@ -5554,7 +5554,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
u32 lval[36];
} tmp;
u_long flags = 0;
-
+
switch(ioc->cmd) {
case DE4X5_GET_HWADDR: /* Get the hardware address */
ioc->len = ETH_ALEN;
@@ -5575,7 +5575,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
@@ -5617,8 +5617,8 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
spin_lock_irqsave(&lp->lock, flags);
memcpy(&statbuf, &lp->pktStats, ioc->len);
spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
+ if (copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
break;
}
case DE4X5_CLR_STATS: /* Zero out the driver statistics */
@@ -5652,9 +5652,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ioc->len = j;
if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
break;
-
+
#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
-/*
+/*
case DE4X5_DUMP:
j = 0;
tmp.addr[j++] = dev->irq;
@@ -5664,7 +5664,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.addr[j++] = lp->rxRingSize;
tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-
+
for (i=0;i<lp->rxRingSize-1;i++){
if (i < 3) {
tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
@@ -5677,7 +5677,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-
+
for (i=0;i<lp->rxRingSize-1;i++){
if (i < 3) {
tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
@@ -5690,14 +5690,14 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-
+
for (i=0;i<lp->rxRingSize;i++){
tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
}
for (i=0;i<lp->txRingSize;i++){
tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
}
-
+
tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
@@ -5706,18 +5706,18 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[j>>2] = lp->chipset; j+=4;
+ tmp.lval[j>>2] = lp->chipset; j+=4;
if (lp->chipset == DC21140) {
tmp.lval[j>>2] = gep_rd(dev); j+=4;
} else {
tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
}
- tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
+ tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- tmp.lval[j>>2] = lp->active; j+=4;
+ tmp.lval[j>>2] = lp->active; j+=4;
tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
@@ -5734,10 +5734,10 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
}
}
-
+
tmp.addr[j++] = lp->txRingSize;
tmp.addr[j++] = netif_queue_stopped(dev);
-
+
ioc->len = j;
if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
break;
@@ -5746,7 +5746,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
default:
return -EOPNOTSUPP;
}
-
+
return status;
}
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index ad37a4074302..57226e5eb8a6 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -38,11 +38,11 @@
/*
** EISA Register Address Map
*/
-#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
#define EISA_CR iobase+0x0c84 /* EISA Control Register */
#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
@@ -1008,8 +1008,8 @@ struct de4x5_ioctl {
unsigned char __user *data; /* Pointer to the data buffer */
};
-/*
-** Recognised commands for the driver
+/*
+** Recognised commands for the driver
*/
#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 74e9075d9c48..ba5b112093f4 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -50,7 +50,7 @@
forget to unmap PCI mapped skbs.
Alan Cox <alan@redhat.com>
- Added new PCI identifiers provided by Clear Zhang at ALi
+ Added new PCI identifiers provided by Clear Zhang at ALi
for their 1563 ethernet device.
TODO
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index fbd9ab60b052..5ffbd5b300c0 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -96,11 +96,11 @@ static const char *block_name[] __devinitdata = {
* tulip_build_fake_mediatable - Build a fake mediatable entry.
* @tp: Ptr to the tulip private data.
*
- * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
+ * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
* srom and can not be handled under the fixup routine. These cards
- * still need a valid mediatable entry for correct csr12 setup and
+ * still need a valid mediatable entry for correct csr12 setup and
* mii handling.
- *
+ *
* Since this is currently a parisc-linux specific function, the
* #ifdef __hppa__ should completely optimize this function away for
* non-parisc hardware.
@@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
tp->flags |= HAS_PHY_IRQ;
tp->csr12_shadow = -1;
}
-#endif
+#endif
}
void __devinit tulip_parse_eeprom(struct net_device *dev)
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index bb3558164a5b..da4f7593c50f 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -139,22 +139,22 @@ int tulip_poll(struct net_device *dev, int *budget)
}
/* Acknowledge current RX interrupt sources. */
iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
-
-
+
+
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
-
-
+
+
if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
break;
-
+
if (tulip_debug > 5)
printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
dev->name, entry, status);
if (--rx_work_limit < 0)
goto not_done;
-
+
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
@@ -180,7 +180,7 @@ int tulip_poll(struct net_device *dev, int *budget)
/* Omit the four octet CRC from the length. */
short pkt_len = ((status >> 16) & 0x7ff) - 4;
struct sk_buff *skb;
-
+
#ifndef final_version
if (pkt_len > 1518) {
printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
@@ -213,7 +213,7 @@ int tulip_poll(struct net_device *dev, int *budget)
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
-
+
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
@@ -225,17 +225,17 @@ int tulip_poll(struct net_device *dev, int *budget)
skb->head, temp);
}
#endif
-
+
pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
-
+
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
}
skb->protocol = eth_type_trans(skb, dev);
-
+
netif_receive_skb(skb);
-
+
dev->last_rx = jiffies;
tp->stats.rx_packets++;
tp->stats.rx_bytes += pkt_len;
@@ -245,12 +245,12 @@ int tulip_poll(struct net_device *dev, int *budget)
entry = (++tp->cur_rx) % RX_RING_SIZE;
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
tulip_refill_rx(dev);
-
+
}
-
+
/* New ack strategy... irq does not ack Rx any longer
hopefully this helps */
-
+
/* Really bad things can happen here... If new packet arrives
* and an irq arrives (tx or just due to occasionally unset
* mask), it will be acked by irq handler, but new thread
@@ -259,28 +259,28 @@ int tulip_poll(struct net_device *dev, int *budget)
* tomorrow (night 011029). If it will not fail, we won
* finally: amount of IO did not increase at all. */
} while ((ioread32(tp->base_addr + CSR5) & RxIntr));
-
+
done:
-
+
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
-
+
/* We use this simplistic scheme for IM. It's proven by
real life installations. We can have IM enabled
- continuesly but this would cause unnecessary latency.
- Unfortunely we can't use all the NET_RX_* feedback here.
- This would turn on IM for devices that is not contributing
- to backlog congestion with unnecessary latency.
-
+ continuesly but this would cause unnecessary latency.
+ Unfortunely we can't use all the NET_RX_* feedback here.
+ This would turn on IM for devices that is not contributing
+ to backlog congestion with unnecessary latency.
+
We monitor the the device RX-ring and have:
-
+
HW Interrupt Mitigation either ON or OFF.
-
- ON: More then 1 pkt received (per intr.) OR we are dropping
+
+ ON: More then 1 pkt received (per intr.) OR we are dropping
OFF: Only 1 pkt received
-
+
Note. We only use min and max (0, 15) settings from mit_table */
-
-
+
+
if( tp->flags & HAS_INTR_MITIGATION) {
if( received > 1 ) {
if( ! tp->mit_on ) {
@@ -297,20 +297,20 @@ done:
}
#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
-
+
dev->quota -= received;
*budget -= received;
-
+
tulip_refill_rx(dev);
-
+
/* If RX ring is not full we are out of memory. */
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
-
+
/* Remove us from polling list and enable RX intr. */
-
+
netif_rx_complete(dev);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
-
+
/* The last op happens after poll completion. Which means the following:
* 1. it can race with disabling irqs in irq handler
* 2. it can race with dise/enabling irqs in other poll threads
@@ -321,9 +321,9 @@ done:
* due to races in masking and due to too late acking of already
* processed irqs. But it must not result in losing events.
*/
-
+
return 0;
-
+
not_done:
if (!received) {
@@ -331,29 +331,29 @@ done:
}
dev->quota -= received;
*budget -= received;
-
+
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
tulip_refill_rx(dev);
-
+
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
-
+
return 1;
-
-
+
+
oom: /* Executed with RX ints disabled */
-
-
+
+
/* Start timer, stop polling, but do not enable rx interrupts. */
mod_timer(&tp->oom_timer, jiffies+1);
-
+
/* Think: timer_pending() was an explicit signature of bug.
* Timer can be pending now but fired and completed
* before we did netif_rx_complete(). See? We would lose it. */
-
+
/* remove ourselves from the polling list */
netif_rx_complete(dev);
-
+
return 0;
}
@@ -521,9 +521,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
/* Let's see whether the interrupt really is for us */
csr5 = ioread32(ioaddr + CSR5);
- if (tp->flags & HAS_PHY_IRQ)
+ if (tp->flags & HAS_PHY_IRQ)
handled = phy_interrupt (dev);
-
+
if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
return IRQ_RETVAL(handled);
@@ -538,17 +538,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
/* Mask RX intrs and add the device to poll list. */
iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
netif_rx_schedule(dev);
-
+
if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
break;
}
-
+
/* Acknowledge the interrupt sources we handle here ASAP
the poll function does Rx and RxNoBuf acking */
-
+
iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
-#else
+#else
/* Acknowledge all of the current interrupt sources ASAP. */
iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
@@ -559,11 +559,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
}
#endif /* CONFIG_TULIP_NAPI */
-
+
if (tulip_debug > 4)
printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
-
+
if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
unsigned int dirty_tx;
@@ -737,17 +737,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
#ifdef CONFIG_TULIP_NAPI
if (rxd)
csr5 &= ~RxPollInt;
- } while ((csr5 & (TxNoBuf |
- TxDied |
- TxIntr |
+ } while ((csr5 & (TxNoBuf |
+ TxDied |
+ TxIntr |
TimerInt |
/* Abnormal intr. */
- RxDied |
- TxFIFOUnderflow |
- TxJabber |
- TPLnkFail |
+ RxDied |
+ TxFIFOUnderflow |
+ TxJabber |
+ TPLnkFail |
SytemError )) != 0);
-#else
+#else
} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
tulip_refill_rx(dev);
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index f53396fe79c9..e9bc2a958c14 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -140,7 +140,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
spin_unlock_irqrestore(&tp->mii_lock, flags);
return;
}
-
+
/* Establish sync by sending 32 logic ones. */
for (i = 32; i >= 0; i--) {
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 05d2d96f7be2..d25020da6798 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -259,7 +259,7 @@ enum t21143_csr6_bits {
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 32
-#define RX_RING_SIZE 128
+#define RX_RING_SIZE 128
#define MEDIA_MASK 31
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index c67c91251d04..b3cf11d32e24 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1224,7 +1224,7 @@ out:
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
* is the DM910X and the on chip ULi devices
*/
-
+
static int tulip_uli_dm_quirk(struct pci_dev *pdev)
{
if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
@@ -1297,7 +1297,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
- aligned. Aries might need this too. The Saturn errata are not
+ aligned. Aries might need this too. The Saturn errata are not
pretty reading but thankfully it's an old 486 chipset.
2. The dreaded SiS496 486 chipset. Same workaround as Intel
@@ -1500,7 +1500,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
#endif
#ifdef CONFIG_MIPS_COBALT
- if ((pdev->bus->number == 0) &&
+ if ((pdev->bus->number == 0) &&
((PCI_SLOT(pdev->devfn) == 7) ||
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 238e9c72cb3a..8b3a28f53c3d 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -9,7 +9,7 @@
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
*/
#define DRV_NAME "uli526x"
@@ -185,7 +185,7 @@ struct uli526x_board_info {
/* NIC SROM data */
unsigned char srom[128];
- u8 init;
+ u8 init;
};
enum uli526x_offsets {
@@ -258,7 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
int i, err;
-
+
ULI526X_DBUG(0, "uli526x_init_one()", 0);
if (!printed_version++)
@@ -316,7 +316,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
err = -ENOMEM;
goto err_out_nomem;
}
-
+
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
db->buf_pool_start = db->buf_pool_ptr;
@@ -324,14 +324,14 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
db->chip_id = ent->driver_data;
db->ioaddr = pci_resource_start(pdev, 0);
-
+
db->pdev = pdev;
db->init = 1;
-
+
dev->base_addr = db->ioaddr;
dev->irq = pdev->irq;
pci_set_drvdata(pdev, dev);
-
+
/* Register some necessary functions */
dev->open = &uli526x_open;
dev->hard_start_xmit = &uli526x_start_xmit;
@@ -341,7 +341,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
dev->ethtool_ops = &netdev_ethtool_ops;
spin_lock_init(&db->lock);
-
+
/* read 64 word srom data */
for (i = 0; i < 64; i++)
((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
@@ -374,7 +374,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
goto err_out_res;
printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
-
+
for (i = 0; i < 6; i++)
printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
printk(", irq %d.\n", dev->irq);
@@ -389,7 +389,7 @@ err_out_nomem:
if(db->desc_pool_ptr)
pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
db->desc_pool_ptr, db->desc_pool_dma_ptr);
-
+
if(db->buf_pool_ptr != NULL)
pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -433,7 +433,7 @@ static int uli526x_open(struct net_device *dev)
{
int ret;
struct uli526x_board_info *db = netdev_priv(dev);
-
+
ULI526X_DBUG(0, "uli526x_open", 0);
ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
@@ -454,7 +454,7 @@ static int uli526x_open(struct net_device *dev)
/* CR6 operation mode decision */
db->cr6_data |= ULI526X_TXTH_256;
db->cr0_data = CR0_DEFAULT;
-
+
/* Initialize ULI526X board */
uli526x_init(dev);
@@ -604,7 +604,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Restore CR7 to enable interrupt */
spin_unlock_irqrestore(&db->lock, flags);
outl(db->cr7_data, dev->base_addr + DCR7);
-
+
/* free this SKB */
dev_kfree_skb(skb);
@@ -782,7 +782,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
struct sk_buff *skb;
int rxlen;
u32 rdes0;
-
+
rxptr = db->rx_ready_ptr;
while(db->rx_avail_cnt) {
@@ -821,7 +821,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
if ( !(rdes0 & 0x8000) ||
((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
skb = rxptr->rx_skb_ptr;
-
+
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ( (rxlen < RX_COPY_SIZE) &&
@@ -841,7 +841,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
dev->last_rx = jiffies;
db->stats.rx_packets++;
db->stats.rx_bytes += rxlen;
-
+
} else {
/* Reuse SKB buffer when the packet is error */
ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
@@ -911,7 +911,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_MII);
-
+
ecmd->advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -924,13 +924,13 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
ecmd->phy_address = db->phy_addr;
ecmd->transceiver = XCVR_EXTERNAL;
-
+
ecmd->speed = 10;
ecmd->duplex = DUPLEX_HALF;
-
+
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
{
- ecmd->speed = 100;
+ ecmd->speed = 100;
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
@@ -939,11 +939,11 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
if(db->link_failed)
{
ecmd->speed = -1;
- ecmd->duplex = -1;
+ ecmd->duplex = -1;
}
-
+
if (db->media_mode & ULI526X_AUTO)
- {
+ {
ecmd->autoneg = AUTONEG_ENABLE;
}
}
@@ -964,15 +964,15 @@ static void netdev_get_drvinfo(struct net_device *dev,
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
struct uli526x_board_info *np = netdev_priv(dev);
-
+
ULi_ethtool_gset(np, cmd);
-
+
return 0;
}
static u32 netdev_get_link(struct net_device *dev) {
struct uli526x_board_info *np = netdev_priv(dev);
-
+
if(np->link_failed)
return 0;
else
@@ -1005,11 +1005,11 @@ static void uli526x_timer(unsigned long data)
struct uli526x_board_info *db = netdev_priv(dev);
unsigned long flags;
u8 TmpSpeed=10;
-
+
//ULI526X_DBUG(0, "uli526x_timer()", 0);
spin_lock_irqsave(&db->lock, flags);
-
+
/* Dynamic reset ULI526X : system error or transmit time-out */
tmp_cr8 = inl(db->ioaddr + DCR8);
if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
@@ -1021,9 +1021,9 @@ static void uli526x_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
- outl(0x1, dev->base_addr + DCR1); // Tx polling again
+ outl(0x1, dev->base_addr + DCR1); // Tx polling again
- // TX Timeout
+ // TX Timeout
if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
@@ -1073,7 +1073,7 @@ static void uli526x_timer(unsigned long data)
uli526x_sense_speed(db) )
db->link_failed = 1;
uli526x_process_mode(db);
-
+
if(db->link_failed==0)
{
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
@@ -1404,7 +1404,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
if ( (phy_mode & 0x24) == 0x24 ) {
-
+
phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
if(phy_mode&0x8000)
phy_mode = 0x8000;
@@ -1414,7 +1414,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
phy_mode = 0x2000;
else
phy_mode = 0x1000;
-
+
/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
switch (phy_mode) {
case 0x1000: db->op_mode = ULI526X_10MHF; break;
@@ -1442,7 +1442,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
static void uli526x_set_phyxcer(struct uli526x_board_info *db)
{
u16 phy_reg;
-
+
/* Phyxcer capability setting */
phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
@@ -1457,7 +1457,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
case ULI526X_100MHF: phy_reg |= 0x80; break;
case ULI526X_100MFD: phy_reg |= 0x100; break;
}
-
+
}
/* Write new capability to Phyxcer Reg4 */
@@ -1556,7 +1556,7 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data
/* Write a word data to PHY controller */
for ( i = 0x8000; i > 0; i >>= 1)
phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
-
+
}
@@ -1574,7 +1574,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
return phy_readby_cr10(iobase, phy_addr, offset);
/* M5261/M5263 Chip */
ioaddr = iobase + DCR9;
-
+
/* Send 33 synchronization clock to Phy controller */
for (i = 0; i < 35; i++)
phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
@@ -1610,7 +1610,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
{
unsigned long ioaddr,cr10_value;
-
+
ioaddr = iobase + DCR10;
cr10_value = phy_addr;
cr10_value = (cr10_value<<5) + offset;
@@ -1629,7 +1629,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
{
unsigned long ioaddr,cr10_value;
-
+
ioaddr = iobase + DCR10;
cr10_value = phy_addr;
cr10_value = (cr10_value<<5) + offset;
@@ -1659,7 +1659,7 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
{
u16 phy_data;
-
+
outl(0x50000 , ioaddr);
udelay(1);
phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 136a70c4d5e4..64ecf929d2ac 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -38,12 +38,12 @@
Copyright (C) 2001 Manfred Spraul
* ethtool support (jgarzik)
* Replace some MII-related magic numbers with constants (jgarzik)
-
+
TODO:
* enable pci_power_off
* Wake-On-LAN
*/
-
+
#define DRV_NAME "winbond-840"
#define DRV_VERSION "1.01-d"
#define DRV_RELDATE "Nov-17-2001"
@@ -57,7 +57,7 @@ c-help-name: Winbond W89c840 PCI Ethernet support
c-help-symbol: CONFIG_WINBOND_840
c-help: This driver is for the Winbond W89c840 chip. It also works with
c-help: the TX9882 chip on the Compex RL100-ATX board.
-c-help: More specific information and updates are available from
+c-help: More specific information and updates are available from
c-help: http://www.scyld.com/network/drivers.html
*/
@@ -207,7 +207,7 @@ Test with 'ping -s 10000' on a fast computer.
*/
-
+
/*
PCI probe table.
@@ -374,7 +374,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct ethtool_ops netdev_ethtool_ops;
static int netdev_close(struct net_device *dev);
-
+
static int __devinit w840_probe1 (struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -434,7 +434,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
np->mii_if.mdio_read = mdio_read;
np->mii_if.mdio_write = mdio_write;
np->base_addr = ioaddr;
-
+
pci_set_drvdata(pdev, dev);
if (dev->mem_start)
@@ -510,7 +510,7 @@ err_out_netdev:
return -ENODEV;
}
-
+
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
often serial bit streams generated by the host processor.
The example below is for the common 93c46 EEPROM, 64 16 bit words. */
@@ -660,7 +660,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
return;
}
-
+
static int netdev_open(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
@@ -731,7 +731,7 @@ static int update_link(struct net_device *dev)
dev->name, np->phys[0]);
netif_carrier_on(dev);
}
-
+
if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
/* If the link partner doesn't support autonegotiation
* the MII detects it's abilities with the "parallel detection".
@@ -761,7 +761,7 @@ static int update_link(struct net_device *dev)
result |= 0x20000000;
if (result != np->csr6 && debug)
printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
+ dev->name, fasteth ? 100 : 10,
duplex ? "full" : "half", np->phys[0]);
return result;
}
@@ -947,7 +947,7 @@ static void init_registers(struct net_device *dev)
iowrite32(i, ioaddr + PCIBusCfg);
np->csr6 = 0;
- /* 128 byte Tx threshold;
+ /* 128 byte Tx threshold;
Transmit on; Receive on; */
update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
@@ -1584,7 +1584,7 @@ static int netdev_close(struct net_device *dev)
static void __devexit w840_remove1 (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
-
+
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
@@ -1640,7 +1640,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_wait(&dev->xmit_lock);
synchronize_irq(dev->irq);
-
+
np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
/* no more hardware accesses behind this line. */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 56344103ac23..63c2175ed138 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1,11 +1,11 @@
/*
- * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
+ * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
*
* This software is (C) by the respective authors, and licensed under the GPL
* License.
*
* Written by Arjan van de Ven for Red Hat, Inc.
- * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
+ * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
@@ -93,7 +93,7 @@ struct xircom_private {
unsigned long io_port;
int open;
-
+
/* transmit_used is the rotating counter that indicates which transmit
descriptor has to be used next */
int transmit_used;
@@ -153,10 +153,10 @@ static struct pci_device_id xircom_pci_table[] = {
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
static struct pci_driver xircom_ops = {
- .name = "xircom_cb",
- .id_table = xircom_pci_table,
- .probe = xircom_probe,
- .remove = xircom_remove,
+ .name = "xircom_cb",
+ .id_table = xircom_pci_table,
+ .probe = xircom_probe,
+ .remove = xircom_remove,
.suspend =NULL,
.resume =NULL
};
@@ -174,7 +174,7 @@ static void print_binary(unsigned int number)
buffer[i2++]='1';
else
buffer[i2++]='0';
- if ((i&3)==0)
+ if ((i&3)==0)
buffer[i2++]=' ';
}
printk("%s\n",buffer);
@@ -196,10 +196,10 @@ static struct ethtool_ops netdev_ethtool_ops = {
/* xircom_probe is the code that gets called on device insertion.
it sets up the hardware and registers the device to the networklayer.
-
+
TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
first two packets that get send, and pump hates that.
-
+
*/
static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -209,7 +209,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
unsigned long flags;
unsigned short tmp16;
enter("xircom_probe");
-
+
/* First do the PCI initialisation */
if (pci_enable_device(pdev))
@@ -217,24 +217,24 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
/* disable all powermanagement */
pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
-
+
pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
- /* clear PCI status, if any */
- pci_read_config_word (pdev,PCI_STATUS, &tmp16);
+ /* clear PCI status, if any */
+ pci_read_config_word (pdev,PCI_STATUS, &tmp16);
pci_write_config_word (pdev, PCI_STATUS,tmp16);
-
+
pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
-
+
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
return -ENODEV;
}
- /*
+ /*
Before changing the hardware, allocate the memory.
This way, we can fail gracefully if not enough memory
- is available.
+ is available.
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev) {
@@ -242,13 +242,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
goto device_fail;
}
private = netdev_priv(dev);
-
+
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
goto rx_buf_fail;
- }
+ }
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
@@ -265,11 +265,11 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
spin_lock_init(&private->lock);
dev->irq = pdev->irq;
dev->base_addr = private->io_port;
-
+
initialize_card(private);
read_mac_address(private);
setup_descriptors(private);
-
+
dev->open = &xircom_open;
dev->hard_start_xmit = &xircom_start_xmit;
dev->stop = &xircom_close;
@@ -285,19 +285,19 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
goto reg_fail;
}
-
+
printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
-
+
spin_lock_irqsave(&private->lock,flags);
activate_transmitter(private);
activate_receiver(private);
spin_unlock_irqrestore(&private->lock,flags);
-
+
trigger_receive(private);
-
+
leave("xircom_probe");
return 0;
@@ -332,7 +332,7 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
leave("xircom_remove");
-}
+}
static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
@@ -346,11 +346,11 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
-#ifdef DEBUG
+#ifdef DEBUG
print_binary(status);
printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
-#endif
+#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
spin_unlock(&card->lock);
@@ -366,21 +366,21 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
netif_carrier_on(dev);
else
netif_carrier_off(dev);
-
+
}
- /* Clear all remaining interrupts */
+ /* Clear all remaining interrupts */
status |= 0xffffffff; /* FIXME: make this clear only the
real existing bits */
outl(status,card->io_port+CSR5);
-
- for (i=0;i<NUMDESCRIPTORS;i++)
+
+ for (i=0;i<NUMDESCRIPTORS;i++)
investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
- for (i=0;i<NUMDESCRIPTORS;i++)
+ for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
-
+
spin_unlock(&card->lock);
leave("xircom_interrupt");
return IRQ_HANDLED;
@@ -393,38 +393,38 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
int nextdescriptor;
int desc;
enter("xircom_start_xmit");
-
+
card = netdev_priv(dev);
spin_lock_irqsave(&card->lock,flags);
-
+
/* First see if we can free some descriptors */
- for (desc=0;desc<NUMDESCRIPTORS;desc++)
+ for (desc=0;desc<NUMDESCRIPTORS;desc++)
investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
-
-
+
+
nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
desc = card->transmit_used;
-
+
/* only send the packet if the descriptor is free */
if (card->tx_buffer[4*desc]==0) {
/* Copy the packet data; zero the memory first as the card
sometimes sends more than you ask it to. */
-
+
memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
-
-
+
+
/* FIXME: The specification tells us that the length we send HAS to be a multiple of
4 bytes. */
-
+
card->tx_buffer[4*desc+1] = skb->len;
if (desc == NUMDESCRIPTORS-1)
card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */
card->tx_buffer[4*desc+1] |= 0xF0000000;
- /* 0xF0... means want interrupts*/
+ /* 0xF0... means want interrupts*/
card->tx_skb[desc] = skb;
-
+
wmb();
/* This gives the descriptor to the card */
card->tx_buffer[4*desc] = 0x80000000;
@@ -433,18 +433,18 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
card->transmit_used = nextdescriptor;
- leave("xircom-start_xmit - sent");
+ leave("xircom-start_xmit - sent");
spin_unlock_irqrestore(&card->lock,flags);
return 0;
}
-
+
/* Uh oh... no free descriptor... drop the packet */
netif_stop_queue(dev);
spin_unlock_irqrestore(&card->lock,flags);
trigger_transmit(card);
-
+
return -EIO;
}
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
leave("xircom_open - No IRQ");
return retval;
}
-
+
xircom_up(xp);
xp->open = 1;
leave("xircom_open");
@@ -473,31 +473,31 @@ static int xircom_close(struct net_device *dev)
{
struct xircom_private *card;
unsigned long flags;
-
+
enter("xircom_close");
card = netdev_priv(dev);
netif_stop_queue(dev); /* we don't want new packets */
-
+
spin_lock_irqsave(&card->lock,flags);
-
+
disable_all_interrupts(card);
-#if 0
+#if 0
/* We can enable this again once we send dummy packets on ifconfig ethX up */
deactivate_receiver(card);
deactivate_transmitter(card);
-#endif
+#endif
remove_descriptors(card);
-
+
spin_unlock_irqrestore(&card->lock,flags);
-
+
card->open = 0;
free_irq(dev->irq,dev);
-
+
leave("xircom_close");
-
+
return 0;
-
+
}
@@ -506,8 +506,8 @@ static struct net_device_stats *xircom_get_stats(struct net_device *dev)
{
struct xircom_private *card = netdev_priv(dev);
return &card->stats;
-}
-
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xircom_poll_controller(struct net_device *dev)
@@ -540,7 +540,7 @@ static void initialize_card(struct xircom_private *card)
outl(val, card->io_port + CSR0);
- val = 0; /* Value 0x00 is a safe and conservative value
+ val = 0; /* Value 0x00 is a safe and conservative value
for the PCI configuration settings */
outl(val, card->io_port + CSR0);
@@ -617,23 +617,23 @@ static void setup_descriptors(struct xircom_private *card)
/* Rx Descr2: address of the buffer
we store the buffer at the 2nd half of the page */
-
+
address = (unsigned long) card->rx_dma_handle;
card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
/* Rx Desc3: address of 2nd buffer -> 0 */
card->rx_buffer[i*4 + 3] = 0;
}
-
+
wmb();
/* Write the receive descriptor ring address to the card */
address = (unsigned long) card->rx_dma_handle;
- val = cpu_to_le32(address);
+ val = cpu_to_le32(address);
outl(val, card->io_port + CSR3); /* Receive descr list address */
/* transmit descriptors */
memset(card->tx_buffer, 0, 128); /* clear the descriptors */
-
+
for (i=0;i<NUMDESCRIPTORS;i++ ) {
/* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
card->tx_buffer[i*4 + 0] = 0x00000000;
@@ -641,7 +641,7 @@ static void setup_descriptors(struct xircom_private *card)
card->tx_buffer[i*4 + 1] = 1536;
if (i==NUMDESCRIPTORS-1)
card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
-
+
/* Tx Descr2: address of the buffer
we store the buffer at the 2nd half of the page */
address = (unsigned long) card->tx_dma_handle;
@@ -748,7 +748,7 @@ static int receive_active(struct xircom_private *card)
activate_receiver enables the receiver on the card.
Before being allowed to active the receiver, the receiver
must be completely de-activated. To achieve this,
-this code actually disables the receiver first; then it waits for the
+this code actually disables the receiver first; then it waits for the
receiver to become inactive, then it activates the receiver and then
it waits for the receiver to be active.
@@ -762,13 +762,13 @@ static void activate_receiver(struct xircom_private *card)
val = inl(card->io_port + CSR6); /* Operation mode */
-
+
/* If the "active" bit is set and the receiver is already
active, no need to do the expensive thing */
if ((val&2) && (receive_active(card)))
return;
-
-
+
+
val = val & ~2; /* disable the receiver */
outl(val, card->io_port + CSR6);
@@ -805,7 +805,7 @@ static void activate_receiver(struct xircom_private *card)
/*
deactivate_receiver disables the receiver on the card.
-To achieve this this code disables the receiver first;
+To achieve this this code disables the receiver first;
then it waits for the receiver to become inactive.
must be called with the lock held and interrupts disabled.
@@ -840,7 +840,7 @@ static void deactivate_receiver(struct xircom_private *card)
activate_transmitter enables the transmitter on the card.
Before being allowed to active the transmitter, the transmitter
must be completely de-activated. To achieve this,
-this code actually disables the transmitter first; then it waits for the
+this code actually disables the transmitter first; then it waits for the
transmitter to become inactive, then it activates the transmitter and then
it waits for the transmitter to be active again.
@@ -856,7 +856,7 @@ static void activate_transmitter(struct xircom_private *card)
val = inl(card->io_port + CSR6); /* Operation mode */
/* If the "active" bit is set and the receiver is already
- active, no need to do the expensive thing */
+ active, no need to do the expensive thing */
if ((val&(1<<13)) && (transmit_active(card)))
return;
@@ -896,7 +896,7 @@ static void activate_transmitter(struct xircom_private *card)
/*
deactivate_transmitter disables the transmitter on the card.
-To achieve this this code disables the transmitter first;
+To achieve this this code disables the transmitter first;
then it waits for the transmitter to become inactive.
must be called with the lock held and interrupts disabled.
@@ -990,7 +990,7 @@ static void disable_all_interrupts(struct xircom_private *card)
{
unsigned int val;
enter("enable_all_interrupts");
-
+
val = 0; /* disable all interrupts */
outl(val, card->io_port + CSR7);
@@ -1031,8 +1031,8 @@ static int enable_promisc(struct xircom_private *card)
unsigned int val;
enter("enable_promisc");
- val = inl(card->io_port + CSR6);
- val = val | (1 << 6);
+ val = inl(card->io_port + CSR6);
+ val = val | (1 << 6);
outl(val, card->io_port + CSR6);
leave("enable_promisc");
@@ -1042,7 +1042,7 @@ static int enable_promisc(struct xircom_private *card)
-/*
+/*
link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
Must be called in locked state with interrupts disabled
@@ -1051,15 +1051,15 @@ static int link_status(struct xircom_private *card)
{
unsigned int val;
enter("link_status");
-
+
val = inb(card->io_port + CSR12);
-
+
if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
return 10;
if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
return 100;
-
- /* If we get here -> no link at all */
+
+ /* If we get here -> no link at all */
leave("link_status");
return 0;
@@ -1071,7 +1071,7 @@ static int link_status(struct xircom_private *card)
/*
read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
-
+
This function will take the spinlock itself and can, as a result, not be called with the lock helt.
*/
static void read_mac_address(struct xircom_private *card)
@@ -1081,7 +1081,7 @@ static void read_mac_address(struct xircom_private *card)
int i;
enter("read_mac_address");
-
+
spin_lock_irqsave(&card->lock, flags);
outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
@@ -1095,7 +1095,7 @@ static void read_mac_address(struct xircom_private *card)
outl(i + 3, card->io_port + CSR10);
data_count = inl(card->io_port + CSR9) & 0xff;
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
- /*
+ /*
* This is it. We have the data we want.
*/
for (j = 0; j < 6; j++) {
@@ -1136,12 +1136,12 @@ static void transceiver_voodoo(struct xircom_private *card)
spin_lock_irqsave(&card->lock, flags);
outl(0x0008, card->io_port + CSR15);
- udelay(25);
+ udelay(25);
outl(0xa8050000, card->io_port + CSR15);
udelay(25);
outl(0xa00f0000, card->io_port + CSR15);
udelay(25);
-
+
spin_unlock_irqrestore(&card->lock, flags);
netif_start_queue(card->dev);
@@ -1163,15 +1163,15 @@ static void xircom_up(struct xircom_private *card)
spin_lock_irqsave(&card->lock, flags);
-
+
enable_link_interrupt(card);
enable_transmit_interrupt(card);
enable_receive_interrupt(card);
enable_common_interrupts(card);
enable_promisc(card);
-
+
/* The card can have received packets already, read them away now */
- for (i=0;i<NUMDESCRIPTORS;i++)
+ for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
@@ -1185,15 +1185,15 @@ static void xircom_up(struct xircom_private *card)
/* Bufferoffset is in BYTES */
static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
{
- int status;
-
+ int status;
+
enter("investigate_read_descriptor");
status = card->rx_buffer[4*descnr];
-
+
if ((status > 0)) { /* packet received */
-
+
/* TODO: discard error packets */
-
+
short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */
struct sk_buff *skb;
@@ -1216,7 +1216,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
dev->last_rx = jiffies;
card->stats.rx_packets++;
card->stats.rx_bytes += pkt_len;
-
+
out:
/* give the buffer back to the card */
card->rx_buffer[4*descnr] = 0x80000000;
@@ -1234,9 +1234,9 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
int status;
enter("investigate_write_descriptor");
-
+
status = card->tx_buffer[4*descnr];
-#if 0
+#if 0
if (status & 0x8000) { /* Major error */
printk(KERN_ERR "Major transmit error status %x \n", status);
card->tx_buffer[4*descnr] = 0;
@@ -1258,7 +1258,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
}
leave("investigate_write_descriptor");
-
+
}
@@ -1271,8 +1271,8 @@ static int __init xircom_init(void)
static void __exit xircom_exit(void)
{
pci_unregister_driver(&xircom_ops);
-}
+}
-module_init(xircom_init)
+module_init(xircom_init)
module_exit(xircom_exit)
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index d9a774b91ddc..f1b2640ebdc6 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -307,7 +307,7 @@ enum velocity_owner {
#define TX_QUEUE_NO 4
#define MAX_HW_MIB_COUNTER 32
-#define VELOCITY_MIN_MTU (1514-14)
+#define VELOCITY_MIN_MTU (64)
#define VELOCITY_MAX_MTU (9000)
/*
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index eba8e5cfacc2..f485a97844cc 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -50,10 +50,6 @@ static const char* devname = "PCI200SYN";
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
-#define PCI_VENDOR_ID_GORAMO 0x10B5 /* uses PLX:9050 ID - this card */
-#define PCI_DEVICE_ID_PCI200SYN 0x9050 /* doesn't have its own ID */
-
-
/*
* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
@@ -262,7 +258,7 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
int i;
card_t *card = pci_get_drvdata(pdev);
- for(i = 0; i < 2; i++)
+ for (i = 0; i < 2; i++)
if (card->ports[i].card) {
struct net_device *dev = port_to_dev(&card->ports[i]);
unregister_hdlc_device(dev);
@@ -385,6 +381,15 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
" %u RX packets rings\n", ramsize / 1024, ramphys,
pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+ if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) {
+ printk(KERN_ERR "Detected PCI200SYN card with old "
+ "configuration data.\n");
+ printk(KERN_ERR "See <http://www.kernel.org/pub/"
+ "linux/utils/net/hdlc/pci200syn/> for update.\n");
+ printk(KERN_ERR "The card will stop working with"
+ " future versions of Linux if not updated.\n");
+ }
+
if (card->tx_ring_buffers < 1) {
printk(KERN_ERR "pci200syn: RAM test failed\n");
pci200_pci_remove_one(pdev);
@@ -396,7 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
writew(readw(p) | 0x0040, p);
/* Allocate IRQ */
- if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
+ if (request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
pdev->irq);
pci200_pci_remove_one(pdev);
@@ -406,7 +411,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
sca_init(card, 0);
- for(i = 0; i < 2; i++) {
+ for (i = 0; i < 2; i++) {
port_t *port = &card->ports[i];
struct net_device *dev = port_to_dev(port);
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -425,7 +430,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
- if(register_hdlc_device(dev)) {
+ if (register_hdlc_device(dev)) {
printk(KERN_ERR "pci200syn: unable to register hdlc "
"device\n");
port->card = NULL;
@@ -445,8 +450,10 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
- { PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+ PCI_DEVICE_ID_PLX_9050, 0, 0, 0 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+ PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e0874cbfefea..30ec235e6935 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -235,7 +235,35 @@ config IPW2200_MONITOR
promiscuous mode via the Wireless Tool's Monitor mode. While in this
mode, no packets can be sent.
-config IPW_QOS
+config IPW2200_RADIOTAP
+ bool "Enable radiotap format 802.11 raw packet support"
+ depends on IPW2200_MONITOR
+
+config IPW2200_PROMISCUOUS
+ bool "Enable creation of a RF radiotap promiscuous interface"
+ depends on IPW2200_MONITOR
+ select IPW2200_RADIOTAP
+ ---help---
+ Enables the creation of a second interface prefixed 'rtap'.
+ This second interface will provide every received in radiotap
+ format.
+
+ This is useful for performing wireless network analysis while
+ maintaining an active association.
+
+ Example usage:
+
+ % modprobe ipw2200 rtap_iface=1
+ % ifconfig rtap0 up
+ % tethereal -i rtap0
+
+ If you do not specify 'rtap_iface=1' as a module parameter then
+ the rtap interface will not be created and you will need to turn
+ it on via sysfs:
+
+ % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
+
+config IPW2200_QOS
bool "Enable QoS support"
depends on IPW2200 && EXPERIMENTAL
@@ -503,6 +531,23 @@ config PRISM54
say M here and read <file:Documentation/modules.txt>. The module
will be called prism54.ko.
+config USB_ZD1201
+ tristate "USB ZD1201 based Wireless device support"
+ depends on USB && NET_RADIO
+ select FW_LOADER
+ ---help---
+ Say Y if you want to use wireless LAN adapters based on the ZyDAS
+ ZD1201 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on wlan0.
+
+ The zd1201 device requires external firmware to be loaded.
+ This can be found at http://linux-lc100020.sourceforge.net/
+
+ To compile this driver as a module, choose M here: the
+ module will be called zd1201.
+
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/bcm43xx/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c86779879361..512603de309a 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -40,3 +40,5 @@ obj-$(CONFIG_BCM43XX) += bcm43xx/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
+
+obj-$(CONFIG_USB_ZD1201) += zd1201.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 00764ddd74d8..4069b79d8259 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -47,6 +47,7 @@
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
+#include <net/ieee80211.h>
#include "airo.h"
@@ -467,6 +468,8 @@ static int do8bitIO = 0;
#define RID_ECHOTEST_RESULTS 0xFF71
#define RID_BSSLISTFIRST 0xFF72
#define RID_BSSLISTNEXT 0xFF73
+#define RID_WPA_BSSLISTFIRST 0xFF74
+#define RID_WPA_BSSLISTNEXT 0xFF75
typedef struct {
u16 cmd;
@@ -739,6 +742,14 @@ typedef struct {
u16 extSoftCap;
} CapabilityRid;
+
+/* Only present on firmware >= 5.30.17 */
+typedef struct {
+ u16 unknown[4];
+ u8 fixed[12]; /* WLAN management frame */
+ u8 iep[624];
+} BSSListRidExtra;
+
typedef struct {
u16 len;
u16 index; /* First is 0 and 0xffff means end of list */
@@ -767,6 +778,9 @@ typedef struct {
} fh;
u16 dsChannel;
u16 atimWindow;
+
+ /* Only present on firmware >= 5.30.17 */
+ BSSListRidExtra extra;
} BSSListRid;
typedef struct {
@@ -1140,8 +1154,6 @@ struct airo_info {
char defindex; // Used with auto wep
struct proc_dir_entry *proc_entry;
spinlock_t aux_lock;
- unsigned long flags;
-#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
#define FLAG_RADIO_OFF 0 /* User disabling of MAC */
#define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */
#define FLAG_RADIO_MASK 0x03
@@ -1151,6 +1163,7 @@ struct airo_info {
#define FLAG_UPDATE_MULTI 5
#define FLAG_UPDATE_UNI 6
#define FLAG_802_11 7
+#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
#define FLAG_PENDING_XMIT 9
#define FLAG_PENDING_XMIT11 10
#define FLAG_MPI 11
@@ -1158,17 +1171,19 @@ struct airo_info {
#define FLAG_COMMIT 13
#define FLAG_RESET 14
#define FLAG_FLASHING 15
-#define JOB_MASK 0x2ff0000
-#define JOB_DIE 16
-#define JOB_XMIT 17
-#define JOB_XMIT11 18
-#define JOB_STATS 19
-#define JOB_PROMISC 20
-#define JOB_MIC 21
-#define JOB_EVENT 22
-#define JOB_AUTOWEP 23
-#define JOB_WSTATS 24
-#define JOB_SCAN_RESULTS 25
+#define FLAG_WPA_CAPABLE 16
+ unsigned long flags;
+#define JOB_DIE 0
+#define JOB_XMIT 1
+#define JOB_XMIT11 2
+#define JOB_STATS 3
+#define JOB_PROMISC 4
+#define JOB_MIC 5
+#define JOB_EVENT 6
+#define JOB_AUTOWEP 7
+#define JOB_WSTATS 8
+#define JOB_SCAN_RESULTS 9
+ unsigned long jobs;
int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen,
int whichbap);
unsigned short *flash;
@@ -1208,6 +1223,11 @@ struct airo_info {
#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
char proc_name[IFNAMSIZ];
+ /* WPA-related stuff */
+ unsigned int bssListFirst;
+ unsigned int bssListNext;
+ unsigned int bssListRidLen;
+
struct list_head network_list;
struct list_head network_free_list;
BSSListElement *networks;
@@ -1264,7 +1284,7 @@ static void micinit(struct airo_info *ai)
{
MICRid mic_rid;
- clear_bit(JOB_MIC, &ai->flags);
+ clear_bit(JOB_MIC, &ai->jobs);
PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
up(&ai->sem);
@@ -1705,24 +1725,24 @@ static void emmh32_final(emmh32_context *context, u8 digest[4])
static int readBSSListRid(struct airo_info *ai, int first,
BSSListRid *list) {
int rc;
- Cmd cmd;
- Resp rsp;
+ Cmd cmd;
+ Resp rsp;
if (first == 1) {
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
- issuecommand(ai, &cmd, &rsp);
- up(&ai->sem);
- /* Let the command take effect */
- ai->task = current;
- ssleep(3);
- ai->task = NULL;
- }
- rc = PC4500_readrid(ai, first ? RID_BSSLISTFIRST : RID_BSSLISTNEXT,
- list, sizeof(*list), 1);
+ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_LISTBSS;
+ if (down_interruptible(&ai->sem))
+ return -ERESTARTSYS;
+ issuecommand(ai, &cmd, &rsp);
+ up(&ai->sem);
+ /* Let the command take effect */
+ ai->task = current;
+ ssleep(3);
+ ai->task = NULL;
+ }
+ rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
+ list, ai->bssListRidLen, 1);
list->len = le16_to_cpu(list->len);
list->index = le16_to_cpu(list->index);
@@ -2112,7 +2132,7 @@ static void airo_end_xmit(struct net_device *dev) {
int fid = priv->xmit.fid;
u32 *fids = priv->fids;
- clear_bit(JOB_XMIT, &priv->flags);
+ clear_bit(JOB_XMIT, &priv->jobs);
clear_bit(FLAG_PENDING_XMIT, &priv->flags);
status = transmit_802_3_packet (priv, fids[fid], skb->data);
up(&priv->sem);
@@ -2162,7 +2182,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
if (down_trylock(&priv->sem) != 0) {
set_bit(FLAG_PENDING_XMIT, &priv->flags);
netif_stop_queue(dev);
- set_bit(JOB_XMIT, &priv->flags);
+ set_bit(JOB_XMIT, &priv->jobs);
wake_up_interruptible(&priv->thr_wait);
} else
airo_end_xmit(dev);
@@ -2177,7 +2197,7 @@ static void airo_end_xmit11(struct net_device *dev) {
int fid = priv->xmit11.fid;
u32 *fids = priv->fids;
- clear_bit(JOB_XMIT11, &priv->flags);
+ clear_bit(JOB_XMIT11, &priv->jobs);
clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
status = transmit_802_11_packet (priv, fids[fid], skb->data);
up(&priv->sem);
@@ -2233,7 +2253,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
if (down_trylock(&priv->sem) != 0) {
set_bit(FLAG_PENDING_XMIT11, &priv->flags);
netif_stop_queue(dev);
- set_bit(JOB_XMIT11, &priv->flags);
+ set_bit(JOB_XMIT11, &priv->jobs);
wake_up_interruptible(&priv->thr_wait);
} else
airo_end_xmit11(dev);
@@ -2244,7 +2264,7 @@ static void airo_read_stats(struct airo_info *ai) {
StatsRid stats_rid;
u32 *vals = stats_rid.vals;
- clear_bit(JOB_STATS, &ai->flags);
+ clear_bit(JOB_STATS, &ai->jobs);
if (ai->power.event) {
up(&ai->sem);
return;
@@ -2272,10 +2292,10 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
{
struct airo_info *local = dev->priv;
- if (!test_bit(JOB_STATS, &local->flags)) {
+ if (!test_bit(JOB_STATS, &local->jobs)) {
/* Get stats out of the card if available */
if (down_trylock(&local->sem) != 0) {
- set_bit(JOB_STATS, &local->flags);
+ set_bit(JOB_STATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
airo_read_stats(local);
@@ -2290,7 +2310,7 @@ static void airo_set_promisc(struct airo_info *ai) {
memset(&cmd, 0, sizeof(cmd));
cmd.cmd=CMD_SETMODE;
- clear_bit(JOB_PROMISC, &ai->flags);
+ clear_bit(JOB_PROMISC, &ai->jobs);
cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
issuecommand(ai, &cmd, &rsp);
up(&ai->sem);
@@ -2302,7 +2322,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
change_bit(FLAG_PROMISC, &ai->flags);
if (down_trylock(&ai->sem) != 0) {
- set_bit(JOB_PROMISC, &ai->flags);
+ set_bit(JOB_PROMISC, &ai->jobs);
wake_up_interruptible(&ai->thr_wait);
} else
airo_set_promisc(ai);
@@ -2380,7 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
}
clear_bit(FLAG_REGISTERED, &ai->flags);
}
- set_bit(JOB_DIE, &ai->flags);
+ set_bit(JOB_DIE, &ai->jobs);
kill_proc(ai->thr_pid, SIGTERM, 1);
wait_for_completion(&ai->thr_exited);
@@ -2701,14 +2721,14 @@ static int reset_card( struct net_device *dev , int lock) {
return 0;
}
-#define MAX_NETWORK_COUNT 64
+#define AIRO_MAX_NETWORK_COUNT 64
static int airo_networks_allocate(struct airo_info *ai)
{
if (ai->networks)
return 0;
ai->networks =
- kzalloc(MAX_NETWORK_COUNT * sizeof(BSSListElement),
+ kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
GFP_KERNEL);
if (!ai->networks) {
airo_print_warn(ai->dev->name, "Out of memory allocating beacons");
@@ -2732,11 +2752,33 @@ static void airo_networks_initialize(struct airo_info *ai)
INIT_LIST_HEAD(&ai->network_free_list);
INIT_LIST_HEAD(&ai->network_list);
- for (i = 0; i < MAX_NETWORK_COUNT; i++)
+ for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++)
list_add_tail(&ai->networks[i].list,
&ai->network_free_list);
}
+static int airo_test_wpa_capable(struct airo_info *ai)
+{
+ int status;
+ CapabilityRid cap_rid;
+ const char *name = ai->dev->name;
+
+ status = readCapabilityRid(ai, &cap_rid, 1);
+ if (status != SUCCESS) return 0;
+
+ /* Only firmware versions 5.30.17 or better can do WPA */
+ if ((cap_rid.softVer > 0x530)
+ || ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) {
+ airo_print_info(name, "WPA is supported.");
+ return 1;
+ }
+
+ /* No WPA support */
+ airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17"
+ " and greater support WPA. Detected %s)", cap_rid.prodVer);
+ return 0;
+}
+
static struct net_device *_init_airo_card( unsigned short irq, int port,
int is_pcmcia, struct pci_dev *pci,
struct device *dmdev )
@@ -2759,6 +2801,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai = dev->priv;
ai->wifidev = NULL;
ai->flags = 0;
+ ai->jobs = 0;
ai->dev = dev;
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
airo_print_dbg(dev->name, "Found an MPI350 card");
@@ -2838,6 +2881,18 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
set_bit(FLAG_FLASHING, &ai->flags);
}
+ /* Test for WPA support */
+ if (airo_test_wpa_capable(ai)) {
+ set_bit(FLAG_WPA_CAPABLE, &ai->flags);
+ ai->bssListFirst = RID_WPA_BSSLISTFIRST;
+ ai->bssListNext = RID_WPA_BSSLISTNEXT;
+ ai->bssListRidLen = sizeof(BSSListRid);
+ } else {
+ ai->bssListFirst = RID_BSSLISTFIRST;
+ ai->bssListNext = RID_BSSLISTNEXT;
+ ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
+ }
+
rc = register_netdev(dev);
if (rc) {
airo_print_err(dev->name, "Couldn't register_netdev");
@@ -2875,7 +2930,7 @@ err_out_irq:
err_out_unlink:
del_airo_dev(dev);
err_out_thr:
- set_bit(JOB_DIE, &ai->flags);
+ set_bit(JOB_DIE, &ai->jobs);
kill_proc(ai->thr_pid, SIGTERM, 1);
wait_for_completion(&ai->thr_exited);
err_out_free:
@@ -2933,7 +2988,7 @@ static void airo_send_event(struct net_device *dev) {
union iwreq_data wrqu;
StatusRid status_rid;
- clear_bit(JOB_EVENT, &ai->flags);
+ clear_bit(JOB_EVENT, &ai->jobs);
PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0);
up(&ai->sem);
wrqu.data.length = 0;
@@ -2947,7 +3002,7 @@ static void airo_send_event(struct net_device *dev) {
static void airo_process_scan_results (struct airo_info *ai) {
union iwreq_data wrqu;
- BSSListRid BSSList;
+ BSSListRid bss;
int rc;
BSSListElement * loop_net;
BSSListElement * tmp_net;
@@ -2960,15 +3015,15 @@ static void airo_process_scan_results (struct airo_info *ai) {
}
/* Try to read the first entry of the scan result */
- rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 0);
- if((rc) || (BSSList.index == 0xffff)) {
+ rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
+ if((rc) || (bss.index == 0xffff)) {
/* No scan results */
goto out;
}
/* Read and parse all entries */
tmp_net = NULL;
- while((!rc) && (BSSList.index != 0xffff)) {
+ while((!rc) && (bss.index != 0xffff)) {
/* Grab a network off the free list */
if (!list_empty(&ai->network_free_list)) {
tmp_net = list_entry(ai->network_free_list.next,
@@ -2977,19 +3032,19 @@ static void airo_process_scan_results (struct airo_info *ai) {
}
if (tmp_net != NULL) {
- memcpy(tmp_net, &BSSList, sizeof(tmp_net->bss));
+ memcpy(tmp_net, &bss, sizeof(tmp_net->bss));
list_add_tail(&tmp_net->list, &ai->network_list);
tmp_net = NULL;
}
/* Read next entry */
- rc = PC4500_readrid(ai, RID_BSSLISTNEXT,
- &BSSList, sizeof(BSSList), 0);
+ rc = PC4500_readrid(ai, ai->bssListNext,
+ &bss, ai->bssListRidLen, 0);
}
out:
ai->scan_timeout = 0;
- clear_bit(JOB_SCAN_RESULTS, &ai->flags);
+ clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
up(&ai->sem);
/* Send an empty event to user space.
@@ -3019,10 +3074,10 @@ static int airo_thread(void *data) {
/* make swsusp happy with our thread */
try_to_freeze();
- if (test_bit(JOB_DIE, &ai->flags))
+ if (test_bit(JOB_DIE, &ai->jobs))
break;
- if (ai->flags & JOB_MASK) {
+ if (ai->jobs) {
locked = down_interruptible(&ai->sem);
} else {
wait_queue_t wait;
@@ -3031,16 +3086,16 @@ static int airo_thread(void *data) {
add_wait_queue(&ai->thr_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ai->flags & JOB_MASK)
+ if (ai->jobs)
break;
if (ai->expires || ai->scan_timeout) {
if (ai->scan_timeout &&
time_after_eq(jiffies,ai->scan_timeout)){
- set_bit(JOB_SCAN_RESULTS,&ai->flags);
+ set_bit(JOB_SCAN_RESULTS, &ai->jobs);
break;
} else if (ai->expires &&
time_after_eq(jiffies,ai->expires)){
- set_bit(JOB_AUTOWEP,&ai->flags);
+ set_bit(JOB_AUTOWEP, &ai->jobs);
break;
}
if (!signal_pending(current)) {
@@ -3069,7 +3124,7 @@ static int airo_thread(void *data) {
if (locked)
continue;
- if (test_bit(JOB_DIE, &ai->flags)) {
+ if (test_bit(JOB_DIE, &ai->jobs)) {
up(&ai->sem);
break;
}
@@ -3079,23 +3134,23 @@ static int airo_thread(void *data) {
continue;
}
- if (test_bit(JOB_XMIT, &ai->flags))
+ if (test_bit(JOB_XMIT, &ai->jobs))
airo_end_xmit(dev);
- else if (test_bit(JOB_XMIT11, &ai->flags))
+ else if (test_bit(JOB_XMIT11, &ai->jobs))
airo_end_xmit11(dev);
- else if (test_bit(JOB_STATS, &ai->flags))
+ else if (test_bit(JOB_STATS, &ai->jobs))
airo_read_stats(ai);
- else if (test_bit(JOB_WSTATS, &ai->flags))
+ else if (test_bit(JOB_WSTATS, &ai->jobs))
airo_read_wireless_stats(ai);
- else if (test_bit(JOB_PROMISC, &ai->flags))
+ else if (test_bit(JOB_PROMISC, &ai->jobs))
airo_set_promisc(ai);
- else if (test_bit(JOB_MIC, &ai->flags))
+ else if (test_bit(JOB_MIC, &ai->jobs))
micinit(ai);
- else if (test_bit(JOB_EVENT, &ai->flags))
+ else if (test_bit(JOB_EVENT, &ai->jobs))
airo_send_event(dev);
- else if (test_bit(JOB_AUTOWEP, &ai->flags))
+ else if (test_bit(JOB_AUTOWEP, &ai->jobs))
timer_func(dev);
- else if (test_bit(JOB_SCAN_RESULTS, &ai->flags))
+ else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs))
airo_process_scan_results(ai);
else /* Shouldn't get here, but we make sure to unlock */
up(&ai->sem);
@@ -3133,7 +3188,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
if ( status & EV_MIC ) {
OUT4500( apriv, EVACK, EV_MIC );
if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
- set_bit(JOB_MIC, &apriv->flags);
+ set_bit(JOB_MIC, &apriv->jobs);
wake_up_interruptible(&apriv->thr_wait);
}
}
@@ -3187,7 +3242,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
if (down_trylock(&apriv->sem) != 0) {
- set_bit(JOB_EVENT, &apriv->flags);
+ set_bit(JOB_EVENT, &apriv->jobs);
wake_up_interruptible(&apriv->thr_wait);
} else
airo_send_event(dev);
@@ -5485,7 +5540,7 @@ static void timer_func( struct net_device *dev ) {
up(&apriv->sem);
/* Schedule check to see if the change worked */
- clear_bit(JOB_AUTOWEP, &apriv->flags);
+ clear_bit(JOB_AUTOWEP, &apriv->jobs);
apriv->expires = RUN_AT(HZ*3);
}
@@ -6876,7 +6931,7 @@ static int airo_get_range(struct net_device *dev,
}
range->num_txpower = i;
range->txpower_capa = IW_TXPOW_MWATT;
- range->we_version_source = 12;
+ range->we_version_source = 19;
range->we_version_compiled = WIRELESS_EXT;
range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
range->retry_flags = IW_RETRY_LIMIT;
@@ -7152,6 +7207,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
u16 capabilities;
char * current_val; /* For rates */
int i;
+ char * buf;
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
@@ -7238,8 +7294,69 @@ static inline char *airo_translate_scan(struct net_device *dev,
if((current_val - current_ev) > IW_EV_LCP_LEN)
current_ev = current_val;
- /* The other data in the scan result are not really
- * interesting, so for now drop it - Jean II */
+ /* Beacon interval */
+ buf = kmalloc(30, GFP_KERNEL);
+ if (buf) {
+ iwe.cmd = IWEVCUSTOM;
+ sprintf(buf, "bcn_int=%d", bss->beaconInterval);
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, buf);
+ kfree(buf);
+ }
+
+ /* Put WPA/RSN Information Elements into the event stream */
+ if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) {
+ unsigned int num_null_ies = 0;
+ u16 length = sizeof (bss->extra.iep);
+ struct ieee80211_info_element *info_element =
+ (struct ieee80211_info_element *) &bss->extra.iep;
+
+ while ((length >= sizeof(*info_element)) && (num_null_ies < 2)) {
+ if (sizeof(*info_element) + info_element->len > length) {
+ /* Invalid element, don't continue parsing IE */
+ break;
+ }
+
+ switch (info_element->id) {
+ case MFIE_TYPE_SSID:
+ /* Two zero-length SSID elements
+ * mean we're done parsing elements */
+ if (!info_element->len)
+ num_null_ies++;
+ break;
+
+ case MFIE_TYPE_GENERIC:
+ if (info_element->len >= 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x50 &&
+ info_element->data[2] == 0xf2 &&
+ info_element->data[3] == 0x01) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = min(info_element->len + 2,
+ MAX_WPA_IE_LEN);
+ current_ev = iwe_stream_add_point(current_ev, end_buf,
+ &iwe, (char *) info_element);
+ }
+ break;
+
+ case MFIE_TYPE_RSN:
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = min(info_element->len + 2,
+ MAX_WPA_IE_LEN);
+ current_ev = iwe_stream_add_point(current_ev, end_buf,
+ &iwe, (char *) info_element);
+ break;
+
+ default:
+ break;
+ }
+
+ length -= sizeof(*info_element) + info_element->len;
+ info_element =
+ (struct ieee80211_info_element *)&info_element->
+ data[info_element->len];
+ }
+ }
return current_ev;
}
@@ -7521,7 +7638,7 @@ static void airo_read_wireless_stats(struct airo_info *local)
u32 *vals = stats_rid.vals;
/* Get stats out of the card */
- clear_bit(JOB_WSTATS, &local->flags);
+ clear_bit(JOB_WSTATS, &local->jobs);
if (local->power.event) {
up(&local->sem);
return;
@@ -7565,10 +7682,10 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
{
struct airo_info *local = dev->priv;
- if (!test_bit(JOB_WSTATS, &local->flags)) {
+ if (!test_bit(JOB_WSTATS, &local->jobs)) {
/* Get stats out of the card if available */
if (down_trylock(&local->sem) != 0) {
- set_bit(JOB_WSTATS, &local->flags);
+ set_bit(JOB_WSTATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
airo_read_wireless_stats(local);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 2e83083935e1..e66fdb1f3cfd 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -645,7 +645,6 @@ struct bcm43xx_private {
unsigned int irq;
void __iomem *mmio_addr;
- unsigned int mmio_len;
/* Do not use the lock directly. Use the bcm43xx_lock* helper
* functions, to be MMIO-safe. */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
index 35a4fcb6d923..7497fb16076e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
@@ -92,7 +92,7 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
fappend("subsystem_vendor: 0x%04x subsystem_device: 0x%04x\n",
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
fappend("IRQ: %d\n", bcm->irq);
- fappend("mmio_addr: 0x%p mmio_len: %u\n", bcm->mmio_addr, bcm->mmio_len);
+ fappend("mmio_addr: 0x%p\n", bcm->mmio_addr);
fappend("chip_id: 0x%04x chip_rev: 0x%02x\n", bcm->chip_id, bcm->chip_rev);
if ((bcm->core_80211[0].rev >= 3) && (bcm43xx_read32(bcm, 0x0158) & (1 << 16)))
fappend("Radio disabled by hardware!\n");
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index bbecba02e697..d0318e525ba7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -624,25 +624,28 @@ err_destroy_tx0:
static u16 generate_cookie(struct bcm43xx_dmaring *ring,
int slot)
{
- u16 cookie = 0x0000;
+ u16 cookie = 0xF000;
/* Use the upper 4 bits of the cookie as
* DMA controller ID and store the slot number
- * in the lower 12 bits
+ * in the lower 12 bits.
+ * Note that the cookie must never be 0, as this
+ * is a special value used in RX path.
*/
switch (ring->mmio_base) {
default:
assert(0);
case BCM43xx_MMIO_DMA1_BASE:
+ cookie = 0xA000;
break;
case BCM43xx_MMIO_DMA2_BASE:
- cookie = 0x1000;
+ cookie = 0xB000;
break;
case BCM43xx_MMIO_DMA3_BASE:
- cookie = 0x2000;
+ cookie = 0xC000;
break;
case BCM43xx_MMIO_DMA4_BASE:
- cookie = 0x3000;
+ cookie = 0xD000;
break;
}
assert(((u16)slot & 0xF000) == 0x0000);
@@ -660,16 +663,16 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
struct bcm43xx_dmaring *ring = NULL;
switch (cookie & 0xF000) {
- case 0x0000:
+ case 0xA000:
ring = dma->tx_ring0;
break;
- case 0x1000:
+ case 0xB000:
ring = dma->tx_ring1;
break;
- case 0x2000:
+ case 0xC000:
ring = dma->tx_ring2;
break;
- case 0x3000:
+ case 0xD000:
ring = dma->tx_ring3;
break;
default:
@@ -839,8 +842,18 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
/* We received an xmit status. */
struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
struct bcm43xx_xmitstatus stat;
+ int i = 0;
stat.cookie = le16_to_cpu(hw->cookie);
+ while (stat.cookie == 0) {
+ if (unlikely(++i >= 10000)) {
+ assert(0);
+ break;
+ }
+ udelay(2);
+ barrier();
+ stat.cookie = le16_to_cpu(hw->cookie);
+ }
stat.flags = hw->flags;
stat.cnt1 = hw->cnt1;
stat.cnt2 = hw->cnt2;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 7ed18cad29f7..736dde96c4a3 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -128,13 +128,15 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging.");
static struct pci_device_id bcm43xx_pci_tbl[] = {
/* Broadcom 4303 802.11b */
{ PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /* Broadcom 4307 802.11b */
+ /* Broadcom 4307 802.11b */
{ PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /* Broadcom 4318 802.11b/g */
+ /* Broadcom 4318 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /* Broadcom 4319 802.11a/b/g */
+ { PCI_VENDOR_ID_BROADCOM, 0x4319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4306 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /* Broadcom 4306 802.11a */
+ /* Broadcom 4306 802.11a */
// { PCI_VENDOR_ID_BROADCOM, 0x4321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4309 802.11a/b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
@@ -3299,8 +3301,7 @@ static void bcm43xx_detach_board(struct bcm43xx_private *bcm)
bcm43xx_chipset_detach(bcm);
/* Do _not_ access the chip, after it is detached. */
- iounmap(bcm->mmio_addr);
-
+ pci_iounmap(pci_dev, bcm->mmio_addr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
@@ -3390,40 +3391,26 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
struct net_device *net_dev = bcm->net_dev;
int err;
int i;
- unsigned long mmio_start, mmio_flags, mmio_len;
u32 coremask;
err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_ERR PFX "unable to wake up pci device (%i)\n", err);
+ printk(KERN_ERR PFX "pci_enable_device() failed\n");
goto out;
}
- mmio_start = pci_resource_start(pci_dev, 0);
- mmio_flags = pci_resource_flags(pci_dev, 0);
- mmio_len = pci_resource_len(pci_dev, 0);
- if (!(mmio_flags & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX
- "%s, region #0 not an MMIO resource, aborting\n",
- pci_name(pci_dev));
- err = -ENODEV;
- goto err_pci_disable;
- }
err = pci_request_regions(pci_dev, KBUILD_MODNAME);
if (err) {
- printk(KERN_ERR PFX
- "could not access PCI resources (%i)\n", err);
+ printk(KERN_ERR PFX "pci_request_regions() failed\n");
goto err_pci_disable;
}
/* enable PCI bus-mastering */
pci_set_master(pci_dev);
- bcm->mmio_addr = ioremap(mmio_start, mmio_len);
+ bcm->mmio_addr = pci_iomap(pci_dev, 0, ~0UL);
if (!bcm->mmio_addr) {
- printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n",
- pci_name(pci_dev));
+ printk(KERN_ERR PFX "pci_iomap() failed\n");
err = -EIO;
goto err_pci_release;
}
- bcm->mmio_len = mmio_len;
net_dev->base_addr = (unsigned long)bcm->mmio_addr;
bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID,
@@ -3517,7 +3504,7 @@ err_80211_unwind:
err_chipset_detach:
bcm43xx_chipset_detach(bcm);
err_iounmap:
- iounmap(bcm->mmio_addr);
+ pci_iounmap(pci_dev, bcm->mmio_addr);
err_pci_release:
pci_release_regions(pci_dev);
err_pci_disable:
@@ -3568,7 +3555,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
unsigned long flags;
int keyidx;
- dprintk(KERN_INFO PFX "set security called\n");
+ dprintk(KERN_INFO PFX "set security called");
bcm43xx_lock_mmio(bcm, flags);
@@ -3581,24 +3568,25 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
if (sec->flags & SEC_ACTIVE_KEY) {
secinfo->active_key = sec->active_key;
- dprintk(KERN_INFO PFX " .active_key = %d\n", sec->active_key);
+ dprintk(", .active_key = %d", sec->active_key);
}
if (sec->flags & SEC_UNICAST_GROUP) {
secinfo->unicast_uses_group = sec->unicast_uses_group;
- dprintk(KERN_INFO PFX " .unicast_uses_group = %d\n", sec->unicast_uses_group);
+ dprintk(", .unicast_uses_group = %d", sec->unicast_uses_group);
}
if (sec->flags & SEC_LEVEL) {
secinfo->level = sec->level;
- dprintk(KERN_INFO PFX " .level = %d\n", sec->level);
+ dprintk(", .level = %d", sec->level);
}
if (sec->flags & SEC_ENABLED) {
secinfo->enabled = sec->enabled;
- dprintk(KERN_INFO PFX " .enabled = %d\n", sec->enabled);
+ dprintk(", .enabled = %d", sec->enabled);
}
if (sec->flags & SEC_ENCRYPT) {
secinfo->encrypt = sec->encrypt;
- dprintk(KERN_INFO PFX " .encrypt = %d\n", sec->encrypt);
+ dprintk(", .encrypt = %d", sec->encrypt);
}
+ dprintk("\n");
if (bcm->initialized && !bcm->ieee->host_encrypt) {
if (secinfo->enabled) {
/* upload WEP keys to hardware */
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 346c6febb033..2aa2f389c0d5 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -121,12 +121,6 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
hw->iobase = address;
hw->reg_spacing = reg_spacing;
hw->inten = 0x0;
-
-#ifdef HERMES_DEBUG_BUFFER
- hw->dbufp = 0;
- memset(&hw->dbuf, 0xff, sizeof(hw->dbuf));
- memset(&hw->profile, 0, sizeof(hw->profile));
-#endif
}
int hermes_init(hermes_t *hw)
@@ -347,19 +341,6 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
reg = hermes_read_reg(hw, oreg);
}
-#ifdef HERMES_DEBUG_BUFFER
- hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++;
-
- if (k < HERMES_BAP_BUSY_TIMEOUT) {
- struct hermes_debug_entry *e =
- &hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE];
- e->bap = bap;
- e->id = id;
- e->offset = offset;
- e->cycles = HERMES_BAP_BUSY_TIMEOUT - k;
- }
-#endif
-
if (reg & HERMES_OFFSET_BUSY)
return -ETIMEDOUT;
@@ -419,8 +400,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
}
/* Write a block of data to the chip's buffer, via the
- * BAP. Synchronization/serialization is the caller's problem. len
- * must be even.
+ * BAP. Synchronization/serialization is the caller's problem.
*
* Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
*/
@@ -430,7 +410,7 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
- if ( (len < 0) || (len % 2) )
+ if (len < 0)
return -EINVAL;
err = hermes_bap_seek(hw, bap, id, offset);
@@ -438,49 +418,12 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
goto out;
/* Actually do the transfer */
- hermes_write_words(hw, dreg, buf, len/2);
+ hermes_write_bytes(hw, dreg, buf, len);
out:
return err;
}
-/* Write a block of data to the chip's buffer with padding if
- * neccessary, via the BAP. Synchronization/serialization is the
- * caller's problem. len must be even.
- *
- * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
- */
-int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, int len,
- u16 id, u16 offset)
-{
- int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
- int err = 0;
-
- if (len < 0 || len % 2 || data_len > len)
- return -EINVAL;
-
- err = hermes_bap_seek(hw, bap, id, offset);
- if (err)
- goto out;
-
- /* Transfer all the complete words of data */
- hermes_write_words(hw, dreg, buf, data_len/2);
- /* If there is an odd byte left over pad and transfer it */
- if (data_len & 1) {
- u8 end[2];
- end[1] = 0;
- end[0] = ((unsigned char *)buf)[data_len - 1];
- hermes_write_words(hw, dreg, end, 1);
- data_len ++;
- }
- /* Now send zeros for the padding */
- if (data_len < len)
- hermes_clear_words(hw, dreg, (len - data_len) / 2);
- /* Complete */
- out:
- return err;
-}
-
/* Read a Length-Type-Value record from the card.
*
* If length is NULL, we ignore the length read from the card, and
@@ -553,7 +496,7 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
count = length - 1;
- hermes_write_words(hw, dreg, value, count);
+ hermes_write_bytes(hw, dreg, value, count << 1);
err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
rid, NULL);
@@ -568,7 +511,6 @@ EXPORT_SYMBOL(hermes_allocate);
EXPORT_SYMBOL(hermes_bap_pread);
EXPORT_SYMBOL(hermes_bap_pwrite);
-EXPORT_SYMBOL(hermes_bap_pwrite_pad);
EXPORT_SYMBOL(hermes_read_ltv);
EXPORT_SYMBOL(hermes_write_ltv);
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 7644f72a9f4e..8e3f0e3edb58 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -328,16 +328,6 @@ struct hermes_multicast {
u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
} __attribute__ ((packed));
-// #define HERMES_DEBUG_BUFFER 1
-#define HERMES_DEBUG_BUFSIZE 4096
-struct hermes_debug_entry {
- int bap;
- u16 id, offset;
- int cycles;
-};
-
-#ifdef __KERNEL__
-
/* Timeouts */
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
@@ -347,14 +337,7 @@ typedef struct hermes {
int reg_spacing;
#define HERMES_16BIT_REGSPACING 0
#define HERMES_32BIT_REGSPACING 1
-
u16 inten; /* Which interrupts should be enabled? */
-
-#ifdef HERMES_DEBUG_BUFFER
- struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE];
- unsigned long dbufp;
- unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1];
-#endif
} hermes_t;
/* Register access convenience macros */
@@ -376,8 +359,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
u16 id, u16 offset);
int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
u16 id, u16 offset);
-int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf,
- unsigned data_len, int len, u16 id, u16 offset);
int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
u16 *length, void *buf);
int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
@@ -425,10 +406,13 @@ static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsi
ioread16_rep(hw->iobase + off, buf, count);
}
-static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count)
+static inline void hermes_write_bytes(struct hermes *hw, int off,
+ const char *buf, unsigned count)
{
off = off << hw->reg_spacing;
- iowrite16_rep(hw->iobase + off, buf, count);
+ iowrite16_rep(hw->iobase + off, buf, count >> 1);
+ if (unlikely(count & 1))
+ iowrite8(buf[count - 1], hw->iobase + off);
}
static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count)
@@ -462,21 +446,4 @@ static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
}
-#else /* ! __KERNEL__ */
-
-/* These are provided for the benefit of userspace drivers and testing programs
- which use ioperm() or iopl() */
-
-#define hermes_read_reg(base, off) (inw((base) + (off)))
-#define hermes_write_reg(base, off, val) (outw((val), (base) + (off)))
-
-#define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name))
-#define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val)))
-
-/* Note that for the next two, the count is in 16-bit words, not bytes */
-#define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count)))
-#define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count)))
-
-#endif /* ! __KERNEL__ */
-
#endif /* _HERMES_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 06a5214145e3..4a5be70c0419 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -534,5 +534,4 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
-EXPORT_SYMBOL(hostap_dump_tx_80211);
EXPORT_SYMBOL(hostap_master_start_xmit);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 06c3fa32b310..ba13125024cb 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -3276,17 +3276,6 @@ EXPORT_SYMBOL(hostap_init_data);
EXPORT_SYMBOL(hostap_init_ap_proc);
EXPORT_SYMBOL(hostap_free_data);
EXPORT_SYMBOL(hostap_check_sta_fw_version);
-EXPORT_SYMBOL(hostap_handle_sta_tx);
-EXPORT_SYMBOL(hostap_handle_sta_release);
EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
-EXPORT_SYMBOL(hostap_update_sta_ps);
-EXPORT_SYMBOL(hostap_handle_sta_rx);
-EXPORT_SYMBOL(hostap_is_sta_assoc);
-EXPORT_SYMBOL(hostap_is_sta_authorized);
-EXPORT_SYMBOL(hostap_add_sta);
-EXPORT_SYMBOL(hostap_update_rates);
-EXPORT_SYMBOL(hostap_add_wds_links);
-EXPORT_SYMBOL(hostap_wds_link_oper);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-EXPORT_SYMBOL(hostap_deauth_all_stas);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 55bed923fbe9..db03dc2646df 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -881,6 +881,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12(
"ZoomAir 11Mbps High", "Rate wireless Networking",
0x273fe3db, 0x32a1eaee),
+ PCMCIA_DEVICE_PROD_ID123(
+ "Pretec", "CompactWLAN Card 802.11b", "2.5",
+ 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
+ PCMCIA_DEVICE_PROD_ID123(
+ "U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
+ 0xc7b8df9d, 0x1700d087, 0x4b74baa0),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 8dd4c4446a64..93786f4218f0 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -1125,11 +1125,9 @@ EXPORT_SYMBOL(hostap_set_auth_algs);
EXPORT_SYMBOL(hostap_dump_rx_header);
EXPORT_SYMBOL(hostap_dump_tx_header);
EXPORT_SYMBOL(hostap_80211_header_parse);
-EXPORT_SYMBOL(hostap_80211_prism_header_parse);
EXPORT_SYMBOL(hostap_80211_get_hdrlen);
EXPORT_SYMBOL(hostap_get_stats);
EXPORT_SYMBOL(hostap_setup_dev);
-EXPORT_SYMBOL(hostap_proc);
EXPORT_SYMBOL(hostap_set_multicast_list_queue);
EXPORT_SYMBOL(hostap_set_hostapd);
EXPORT_SYMBOL(hostap_set_hostapd_sta);
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index bca89cff85a6..39f82f219749 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -33,7 +33,44 @@
#include "ipw2200.h"
#include <linux/version.h>
-#define IPW2200_VERSION "git-1.1.1"
+
+#ifndef KBUILD_EXTMOD
+#define VK "k"
+#else
+#define VK
+#endif
+
+#ifdef CONFIG_IPW2200_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#ifdef CONFIG_IPW2200_MONITOR
+#define VM "m"
+#else
+#define VM
+#endif
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+#define VP "p"
+#else
+#define VP
+#endif
+
+#ifdef CONFIG_IPW2200_RADIOTAP
+#define VR "r"
+#else
+#define VR
+#endif
+
+#ifdef CONFIG_IPW2200_QOS
+#define VQ "q"
+#else
+#define VQ
+#endif
+
+#define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
#define DRV_VERSION IPW2200_VERSION
@@ -46,7 +83,9 @@ MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
static int cmdlog = 0;
+#ifdef CONFIG_IPW2200_DEBUG
static int debug = 0;
+#endif
static int channel = 0;
static int mode = 0;
@@ -61,8 +100,14 @@ static int roaming = 1;
static const char ipw_modes[] = {
'a', 'b', 'g', '?'
};
+static int antenna = CFG_SYS_ANTENNA_BOTH;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
+#endif
+
+
+#ifdef CONFIG_IPW2200_QOS
static int qos_enable = 0;
static int qos_burst_enable = 0;
static int qos_no_ack_mask = 0;
@@ -126,7 +171,7 @@ static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_q
*qos_param);
static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
*qos_param);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
static void ipw_remove_current_network(struct ipw_priv *priv);
@@ -1269,6 +1314,105 @@ static ssize_t show_cmd_log(struct device *d,
static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static void ipw_prom_free(struct ipw_priv *priv);
+static int ipw_prom_alloc(struct ipw_priv *priv);
+static ssize_t store_rtap_iface(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+ int rc = 0;
+
+ if (count < 1)
+ return -EINVAL;
+
+ switch (buf[0]) {
+ case '0':
+ if (!rtap_iface)
+ return count;
+
+ if (netif_running(priv->prom_net_dev)) {
+ IPW_WARNING("Interface is up. Cannot unregister.\n");
+ return count;
+ }
+
+ ipw_prom_free(priv);
+ rtap_iface = 0;
+ break;
+
+ case '1':
+ if (rtap_iface)
+ return count;
+
+ rc = ipw_prom_alloc(priv);
+ if (!rc)
+ rtap_iface = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rc) {
+ IPW_ERROR("Failed to register promiscuous network "
+ "device (error %d).\n", rc);
+ }
+
+ return count;
+}
+
+static ssize_t show_rtap_iface(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+ if (rtap_iface)
+ return sprintf(buf, "%s", priv->prom_net_dev->name);
+ else {
+ buf[0] = '-';
+ buf[1] = '1';
+ buf[2] = '\0';
+ return 3;
+ }
+}
+
+static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
+ store_rtap_iface);
+
+static ssize_t store_rtap_filter(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+
+ if (!priv->prom_priv) {
+ IPW_ERROR("Attempting to set filter without "
+ "rtap_iface enabled.\n");
+ return -EPERM;
+ }
+
+ priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
+
+ IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
+ BIT_ARG16(priv->prom_priv->filter));
+
+ return count;
+}
+
+static ssize_t show_rtap_filter(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+ return sprintf(buf, "0x%04X",
+ priv->prom_priv ? priv->prom_priv->filter : 0);
+}
+
+static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
+ store_rtap_filter);
+#endif
+
static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -2025,16 +2169,11 @@ static int ipw_send_host_complete(struct ipw_priv *priv)
return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
}
-static int ipw_send_system_config(struct ipw_priv *priv,
- struct ipw_sys_config *config)
+static int ipw_send_system_config(struct ipw_priv *priv)
{
- if (!priv || !config) {
- IPW_ERROR("Invalid args\n");
- return -1;
- }
-
- return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
- config);
+ return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
+ sizeof(priv->sys_config),
+ &priv->sys_config);
}
static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
@@ -3104,10 +3243,10 @@ static int ipw_reset_nic(struct ipw_priv *priv)
struct ipw_fw {
- u32 ver;
- u32 boot_size;
- u32 ucode_size;
- u32 fw_size;
+ __le32 ver;
+ __le32 boot_size;
+ __le32 ucode_size;
+ __le32 fw_size;
u8 data[0];
};
@@ -3131,8 +3270,8 @@ static int ipw_get_fw(struct ipw_priv *priv,
fw = (void *)(*raw)->data;
- if ((*raw)->size < sizeof(*fw) +
- fw->boot_size + fw->ucode_size + fw->fw_size) {
+ if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
+ le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
IPW_ERROR("%s is too small or corrupt (%zd)\n",
name, (*raw)->size);
return -EINVAL;
@@ -3237,8 +3376,9 @@ static int ipw_load(struct ipw_priv *priv)
fw = (void *)raw->data;
boot_img = &fw->data[0];
- ucode_img = &fw->data[fw->boot_size];
- fw_img = &fw->data[fw->boot_size + fw->ucode_size];
+ ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
+ fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
+ le32_to_cpu(fw->ucode_size)];
if (rc < 0)
goto error;
@@ -3272,7 +3412,7 @@ static int ipw_load(struct ipw_priv *priv)
IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
/* DMA the initial boot firmware into the device */
- rc = ipw_load_firmware(priv, boot_img, fw->boot_size);
+ rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
if (rc < 0) {
IPW_ERROR("Unable to load boot firmware: %d\n", rc);
goto error;
@@ -3294,7 +3434,7 @@ static int ipw_load(struct ipw_priv *priv)
ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
/* DMA the ucode into the device */
- rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size);
+ rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
if (rc < 0) {
IPW_ERROR("Unable to load ucode: %d\n", rc);
goto error;
@@ -3304,7 +3444,7 @@ static int ipw_load(struct ipw_priv *priv)
ipw_stop_nic(priv);
/* DMA bss firmware into the device */
- rc = ipw_load_firmware(priv, fw_img, fw->fw_size);
+ rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
if (rc < 0) {
IPW_ERROR("Unable to load firmware: %d\n", rc);
goto error;
@@ -3700,7 +3840,17 @@ static void ipw_bg_disassociate(void *data)
static void ipw_system_config(void *data)
{
struct ipw_priv *priv = data;
- ipw_send_system_config(priv, &priv->sys_config);
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
+ priv->sys_config.accept_all_data_frames = 1;
+ priv->sys_config.accept_non_directed_frames = 1;
+ priv->sys_config.accept_all_mgmt_bcpr = 1;
+ priv->sys_config.accept_all_mgmt_frames = 1;
+ }
+#endif
+
+ ipw_send_system_config(priv);
}
struct ipw_status_code {
@@ -3771,6 +3921,13 @@ static void inline average_init(struct average *avg)
memset(avg, 0, sizeof(*avg));
}
+#define DEPTH_RSSI 8
+#define DEPTH_NOISE 16
+static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
+{
+ return ((depth-1)*prev_avg + val)/depth;
+}
+
static void average_add(struct average *avg, s16 val)
{
avg->sum -= avg->entries[avg->pos];
@@ -3800,8 +3957,8 @@ static void ipw_reset_stats(struct ipw_priv *priv)
priv->quality = 0;
average_init(&priv->average_missed_beacons);
- average_init(&priv->average_rssi);
- average_init(&priv->average_noise);
+ priv->exp_avg_rssi = -60;
+ priv->exp_avg_noise = -85 + 0x100;
priv->last_rate = 0;
priv->last_missed_beacons = 0;
@@ -4008,7 +4165,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
tx_quality, tx_failures_delta, tx_packets_delta);
- rssi = average_value(&priv->average_rssi);
+ rssi = priv->exp_avg_rssi;
signal_quality =
(100 *
(priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
@@ -4185,7 +4342,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
queue_work(priv->workqueue,
&priv->system_config);
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
if ((priv->status & STATUS_AUTH) &&
@@ -4482,6 +4639,24 @@ static void ipw_rx_notification(struct ipw_priv *priv,
&& priv->status & STATUS_ASSOCIATED)
queue_delayed_work(priv->workqueue,
&priv->request_scan, HZ);
+
+ /* Send an empty event to user space.
+ * We don't send the received data on the event because
+ * it would require us to do complex transcoding, and
+ * we want to minimise the work done in the irq handler
+ * Use a request to extract the data.
+ * Also, we generate this even for any scan, regardless
+ * on how the scan was initiated. User space can just
+ * sync on periodic scan to get fresh data...
+ * Jean II */
+ if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
+ union iwreq_data wrqu;
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wireless_send_event(priv->net_dev, SIOCGIWSCAN,
+ &wrqu, NULL);
+ }
break;
}
@@ -4577,11 +4752,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
case HOST_NOTIFICATION_NOISE_STATS:{
if (notif->size == sizeof(u32)) {
- priv->last_noise =
- (u8) (le32_to_cpu(notif->u.noise.value) &
- 0xff);
- average_add(&priv->average_noise,
- priv->last_noise);
+ priv->exp_avg_noise =
+ exponential_average(priv->exp_avg_noise,
+ (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
+ DEPTH_NOISE);
break;
}
@@ -6170,8 +6344,6 @@ static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
{
/* make sure WPA is enabled */
ipw_wpa_enable(priv, 1);
-
- ipw_disassociate(priv);
}
static int ipw_set_rsn_capa(struct ipw_priv *priv,
@@ -6365,6 +6537,7 @@ static int ipw_wx_set_auth(struct net_device *dev,
case IW_AUTH_WPA_ENABLED:
ret = ipw_wpa_enable(priv, param->value);
+ ipw_disassociate(priv);
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
@@ -6506,7 +6679,7 @@ static int ipw_wx_set_mlme(struct net_device *dev,
return 0;
}
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
/* QoS */
/*
@@ -6853,61 +7026,55 @@ static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
return from_priority_to_tx_queue[priority] - 1;
}
-/*
-* add QoS parameter to the TX command
-*/
-static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
- u16 priority,
- struct tfd_data *tfd, u8 unicast)
+static int ipw_is_qos_active(struct net_device *dev,
+ struct sk_buff *skb)
{
- int ret = 0;
- int tx_queue_id = 0;
+ struct ipw_priv *priv = ieee80211_priv(dev);
struct ieee80211_qos_data *qos_data = NULL;
int active, supported;
- unsigned long flags;
+ u8 *daddr = skb->data + ETH_ALEN;
+ int unicast = !is_multicast_ether_addr(daddr);
if (!(priv->status & STATUS_ASSOCIATED))
return 0;
qos_data = &priv->assoc_network->qos_data;
- spin_lock_irqsave(&priv->ieee->lock, flags);
-
if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
if (unicast == 0)
qos_data->active = 0;
else
qos_data->active = qos_data->supported;
}
-
active = qos_data->active;
supported = qos_data->supported;
-
- spin_unlock_irqrestore(&priv->ieee->lock, flags);
-
IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
"unicast %d\n",
priv->qos_data.qos_enable, active, supported, unicast);
- if (active && priv->qos_data.qos_enable) {
- ret = from_priority_to_tx_queue[priority];
- tx_queue_id = ret - 1;
- IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
- if (priority <= 7) {
- tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
- tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
- tfd->tfd.tfd_26.mchdr.frame_ctl |=
- IEEE80211_STYPE_QOS_DATA;
-
- if (priv->qos_data.qos_no_ack_mask &
- (1UL << tx_queue_id)) {
- tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
- tfd->tfd.tfd_26.mchdr.qos_ctrl |=
- CTRL_QOS_NO_ACK;
- }
- }
- }
+ if (active && priv->qos_data.qos_enable)
+ return 1;
- return ret;
+ return 0;
+
+}
+/*
+* add QoS parameter to the TX command
+*/
+static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
+ u16 priority,
+ struct tfd_data *tfd)
+{
+ int tx_queue_id = 0;
+
+
+ tx_queue_id = from_priority_to_tx_queue[priority] - 1;
+ tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
+
+ if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
+ tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
+ tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
+ }
+ return 0;
}
/*
@@ -6977,7 +7144,7 @@ static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos
qos_param);
}
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
static int ipw_associate_network(struct ipw_priv *priv,
struct ieee80211_network *network,
@@ -7116,7 +7283,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
else
priv->sys_config.answer_broadcast_ssid_probe = 0;
- err = ipw_send_system_config(priv, &priv->sys_config);
+ err = ipw_send_system_config(priv);
if (err) {
IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
return err;
@@ -7141,7 +7308,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
priv->assoc_network = network;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
ipw_qos_association(priv, network);
#endif
@@ -7415,7 +7582,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv,
}
}
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
struct ipw_rx_mem_buffer *rxb,
struct ieee80211_rx_stats *stats)
@@ -7432,15 +7599,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
/* Magic struct that slots into the radiotap header -- no reason
* to build this manually element by element, we can write it much
* more efficiently than we can parse it. ORDER MATTERS HERE */
- struct ipw_rt_hdr {
- struct ieee80211_radiotap_header rt_hdr;
- u8 rt_flags; /* radiotap packet flags */
- u8 rt_rate; /* rate in 500kb/s */
- u16 rt_channel; /* channel in mhz */
- u16 rt_chbitmask; /* channel bitfield */
- s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
- u8 rt_antenna; /* antenna number */
- } *ipw_rt;
+ struct ipw_rt_hdr *ipw_rt;
short len = le16_to_cpu(pkt->u.frame.length);
@@ -7494,9 +7653,11 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
/* Big bitfield of all the fields we provide in radiotap */
ipw_rt->rt_hdr.it_present =
((1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_RATE) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
(1 << IEEE80211_RADIOTAP_ANTENNA));
/* Zero the flags, we'll add to them as we go */
@@ -7582,6 +7743,217 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
}
#endif
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+#define ieee80211_is_probe_response(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
+ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
+
+#define ieee80211_is_management(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
+
+#define ieee80211_is_control(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
+
+#define ieee80211_is_data(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
+
+#define ieee80211_is_assoc_request(fc) \
+ ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
+
+#define ieee80211_is_reassoc_request(fc) \
+ ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
+
+static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
+ struct ipw_rx_mem_buffer *rxb,
+ struct ieee80211_rx_stats *stats)
+{
+ struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
+ struct ipw_rx_frame *frame = &pkt->u.frame;
+ struct ipw_rt_hdr *ipw_rt;
+
+ /* First cache any information we need before we overwrite
+ * the information provided in the skb from the hardware */
+ struct ieee80211_hdr *hdr;
+ u16 channel = frame->received_channel;
+ u8 phy_flags = frame->antennaAndPhy;
+ s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
+ s8 noise = frame->noise;
+ u8 rate = frame->rate;
+ short len = le16_to_cpu(pkt->u.frame.length);
+ u64 tsf = 0;
+ struct sk_buff *skb;
+ int hdr_only = 0;
+ u16 filter = priv->prom_priv->filter;
+
+ /* If the filter is set to not include Rx frames then return */
+ if (filter & IPW_PROM_NO_RX)
+ return;
+
+ /* We received data from the HW, so stop the watchdog */
+ priv->prom_net_dev->trans_start = jiffies;
+
+ if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
+ priv->prom_priv->ieee->stats.rx_errors++;
+ IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
+ return;
+ }
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!netif_running(priv->prom_net_dev))) {
+ priv->prom_priv->ieee->stats.rx_dropped++;
+ IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+ return;
+ }
+
+ /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
+ * that now */
+ if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
+ /* FIXME: Should alloc bigger skb instead */
+ priv->prom_priv->ieee->stats.rx_dropped++;
+ IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
+ return;
+ }
+
+ hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
+ if (ieee80211_is_management(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_MGMT)
+ return;
+ if (filter & IPW_PROM_MGMT_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_control(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_CTL)
+ return;
+ if (filter & IPW_PROM_CTL_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_data(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_DATA)
+ return;
+ if (filter & IPW_PROM_DATA_HEADER_ONLY)
+ hdr_only = 1;
+ }
+
+ /* Copy the SKB since this is for the promiscuous side */
+ skb = skb_copy(rxb->skb, GFP_ATOMIC);
+ if (skb == NULL) {
+ IPW_ERROR("skb_clone failed for promiscuous copy.\n");
+ return;
+ }
+
+ /* copy the frame data to write after where the radiotap header goes */
+ ipw_rt = (void *)skb->data;
+
+ if (hdr_only)
+ len = ieee80211_get_hdrlen(hdr->frame_ctl);
+
+ memcpy(ipw_rt->payload, hdr, len);
+
+ /* Zero the radiotap static buffer ... We only need to zero the bytes
+ * NOT part of our real header, saves a little time.
+ *
+ * No longer necessary since we fill in all our data. Purge before
+ * merging patch officially.
+ * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
+ * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
+ */
+
+ ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
+ ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
+
+ /* Set the size of the skb to the size of the frame */
+ skb_put(skb, ipw_rt->rt_hdr.it_len + len);
+
+ /* Big bitfield of all the fields we provide in radiotap */
+ ipw_rt->rt_hdr.it_present =
+ ((1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_TSFT) |
+ (1 << IEEE80211_RADIOTAP_RATE) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
+ (1 << IEEE80211_RADIOTAP_ANTENNA));
+
+ /* Zero the flags, we'll add to them as we go */
+ ipw_rt->rt_flags = 0;
+
+ ipw_rt->rt_tsf = tsf;
+
+ /* Convert to DBM */
+ ipw_rt->rt_dbmsignal = signal;
+ ipw_rt->rt_dbmnoise = noise;
+
+ /* Convert the channel data and set the flags */
+ ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
+ if (channel > 14) { /* 802.11a */
+ ipw_rt->rt_chbitmask =
+ cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
+ } else if (phy_flags & (1 << 5)) { /* 802.11b */
+ ipw_rt->rt_chbitmask =
+ cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
+ } else { /* 802.11g */
+ ipw_rt->rt_chbitmask =
+ (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
+ }
+
+ /* set the rate in multiples of 500k/s */
+ switch (rate) {
+ case IPW_TX_RATE_1MB:
+ ipw_rt->rt_rate = 2;
+ break;
+ case IPW_TX_RATE_2MB:
+ ipw_rt->rt_rate = 4;
+ break;
+ case IPW_TX_RATE_5MB:
+ ipw_rt->rt_rate = 10;
+ break;
+ case IPW_TX_RATE_6MB:
+ ipw_rt->rt_rate = 12;
+ break;
+ case IPW_TX_RATE_9MB:
+ ipw_rt->rt_rate = 18;
+ break;
+ case IPW_TX_RATE_11MB:
+ ipw_rt->rt_rate = 22;
+ break;
+ case IPW_TX_RATE_12MB:
+ ipw_rt->rt_rate = 24;
+ break;
+ case IPW_TX_RATE_18MB:
+ ipw_rt->rt_rate = 36;
+ break;
+ case IPW_TX_RATE_24MB:
+ ipw_rt->rt_rate = 48;
+ break;
+ case IPW_TX_RATE_36MB:
+ ipw_rt->rt_rate = 72;
+ break;
+ case IPW_TX_RATE_48MB:
+ ipw_rt->rt_rate = 96;
+ break;
+ case IPW_TX_RATE_54MB:
+ ipw_rt->rt_rate = 108;
+ break;
+ default:
+ ipw_rt->rt_rate = 0;
+ break;
+ }
+
+ /* antenna number */
+ ipw_rt->rt_antenna = (phy_flags & 3);
+
+ /* set the preamble flag if we have it */
+ if (phy_flags & (1 << 6))
+ ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+
+ IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
+
+ if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
+ priv->prom_priv->ieee->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ }
+}
+#endif
+
static int is_network_packet(struct ipw_priv *priv,
struct ieee80211_hdr_4addr *header)
{
@@ -7808,15 +8180,21 @@ static void ipw_rx(struct ipw_priv *priv)
priv->rx_packets++;
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
+ ipw_handle_promiscuous_rx(priv, rxb, &stats);
+#endif
+
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
-#ifdef CONFIG_IEEE80211_RADIOTAP
- ipw_handle_data_packet_monitor(priv,
- rxb,
- &stats);
+#ifdef CONFIG_IPW2200_RADIOTAP
+
+ ipw_handle_data_packet_monitor(priv,
+ rxb,
+ &stats);
#else
- ipw_handle_data_packet(priv, rxb,
- &stats);
+ ipw_handle_data_packet(priv, rxb,
+ &stats);
#endif
break;
}
@@ -7837,9 +8215,9 @@ static void ipw_rx(struct ipw_priv *priv)
if (network_packet && priv->assoc_network) {
priv->assoc_network->stats.rssi =
stats.rssi;
- average_add(&priv->average_rssi,
- stats.rssi);
- priv->last_rx_rssi = stats.rssi;
+ priv->exp_avg_rssi =
+ exponential_average(priv->exp_avg_rssi,
+ stats.rssi, DEPTH_RSSI);
}
IPW_DEBUG_RX("Frame: len=%u\n",
@@ -7982,10 +8360,10 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
/* TODO: Validate that provided channel is in range */
}
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
ipw_qos_init(priv, qos_enable, qos_burst_enable,
burst_duration_CCK, burst_duration_OFDM);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
switch (mode) {
case 1:
@@ -7996,7 +8374,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
#ifdef CONFIG_IPW2200_MONITOR
case 2:
priv->ieee->iw_mode = IW_MODE_MONITOR;
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
#else
priv->net_dev->type = ARPHRD_IEEE80211;
@@ -8251,7 +8629,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
priv->net_dev->type = ARPHRD_ETHER;
if (wrqu->mode == IW_MODE_MONITOR)
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
#else
priv->net_dev->type = ARPHRD_IEEE80211;
@@ -8379,7 +8757,8 @@ static int ipw_wx_get_range(struct net_device *dev,
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
- IW_EVENT_CAPA_MASK(SIOCGIWAP));
+ IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+ IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
@@ -8734,6 +9113,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
struct ipw_priv *priv = ieee80211_priv(dev);
mutex_lock(&priv->mutex);
wrqu->bitrate.value = priv->last_rate;
+ wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
return 0;
@@ -9351,7 +9731,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
if (enable) {
if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
#else
priv->net_dev->type = ARPHRD_IEEE80211;
@@ -9579,8 +9959,8 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
}
wstats->qual.qual = priv->quality;
- wstats->qual.level = average_value(&priv->average_rssi);
- wstats->qual.noise = average_value(&priv->average_noise);
+ wstats->qual.level = priv->exp_avg_rssi;
+ wstats->qual.noise = priv->exp_avg_noise;
wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
@@ -9608,7 +9988,9 @@ static void init_sys_config(struct ipw_sys_config *sys_config)
sys_config->disable_unicast_decryption = 1;
sys_config->exclude_multicast_unencrypted = 0;
sys_config->disable_multicast_decryption = 1;
- sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV;
+ if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
+ antenna = CFG_SYS_ANTENNA_BOTH;
+ sys_config->antenna_diversity = antenna;
sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
sys_config->dot11g_auto_detection = 0;
sys_config->enable_cts_to_self = 0;
@@ -9647,11 +10029,11 @@ we need to heavily modify the ieee80211_skb_to_txb.
static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
int pri)
{
- struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
+ struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
txb->fragments[0]->data;
int i = 0;
struct tfd_frame *tfd;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
int tx_id = ipw_get_tx_queue_number(priv, pri);
struct clx2_tx_queue *txq = &priv->txq[tx_id];
#else
@@ -9662,9 +10044,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes;
int fc;
+ hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC:
- hdr_len = IEEE80211_3ADDR_LEN;
unicast = !is_multicast_ether_addr(hdr->addr1);
id = ipw_find_station(priv, hdr->addr1);
if (id == IPW_INVALID_STATION) {
@@ -9681,7 +10063,6 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
case IW_MODE_INFRA:
default:
unicast = !is_multicast_ether_addr(hdr->addr3);
- hdr_len = IEEE80211_3ADDR_LEN;
id = 0;
break;
}
@@ -9759,9 +10140,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
/* No hardware encryption */
tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
-#ifdef CONFIG_IPW_QOS
- ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
-#endif /* CONFIG_IPW_QOS */
+#ifdef CONFIG_IPW2200_QOS
+ if (fc & IEEE80211_STYPE_QOS_DATA)
+ ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
+#endif /* CONFIG_IPW2200_QOS */
/* payload */
tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
@@ -9841,12 +10223,12 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
static int ipw_net_is_queue_full(struct net_device *dev, int pri)
{
struct ipw_priv *priv = ieee80211_priv(dev);
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
int tx_id = ipw_get_tx_queue_number(priv, pri);
struct clx2_tx_queue *txq = &priv->txq[tx_id];
#else
struct clx2_tx_queue *txq = &priv->txq[0];
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
if (ipw_queue_space(&txq->q) < txq->q.high_mark)
return 1;
@@ -9854,6 +10236,88 @@ static int ipw_net_is_queue_full(struct net_device *dev, int pri)
return 0;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
+ struct ieee80211_txb *txb)
+{
+ struct ieee80211_rx_stats dummystats;
+ struct ieee80211_hdr *hdr;
+ u8 n;
+ u16 filter = priv->prom_priv->filter;
+ int hdr_only = 0;
+
+ if (filter & IPW_PROM_NO_TX)
+ return;
+
+ memset(&dummystats, 0, sizeof(dummystats));
+
+ /* Filtering of fragment chains is done agains the first fragment */
+ hdr = (void *)txb->fragments[0]->data;
+ if (ieee80211_is_management(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_MGMT)
+ return;
+ if (filter & IPW_PROM_MGMT_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_control(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_CTL)
+ return;
+ if (filter & IPW_PROM_CTL_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_data(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_DATA)
+ return;
+ if (filter & IPW_PROM_DATA_HEADER_ONLY)
+ hdr_only = 1;
+ }
+
+ for(n=0; n<txb->nr_frags; ++n) {
+ struct sk_buff *src = txb->fragments[n];
+ struct sk_buff *dst;
+ struct ieee80211_radiotap_header *rt_hdr;
+ int len;
+
+ if (hdr_only) {
+ hdr = (void *)src->data;
+ len = ieee80211_get_hdrlen(hdr->frame_ctl);
+ } else
+ len = src->len;
+
+ dst = alloc_skb(
+ len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
+ if (!dst) continue;
+
+ rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
+
+ rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
+ rt_hdr->it_pad = 0;
+ rt_hdr->it_present = 0; /* after all, it's just an idea */
+ rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
+
+ *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
+ ieee80211chan2mhz(priv->channel));
+ if (priv->channel > 14) /* 802.11a */
+ *(u16*)skb_put(dst, sizeof(u16)) =
+ cpu_to_le16(IEEE80211_CHAN_OFDM |
+ IEEE80211_CHAN_5GHZ);
+ else if (priv->ieee->mode == IEEE_B) /* 802.11b */
+ *(u16*)skb_put(dst, sizeof(u16)) =
+ cpu_to_le16(IEEE80211_CHAN_CCK |
+ IEEE80211_CHAN_2GHZ);
+ else /* 802.11g */
+ *(u16*)skb_put(dst, sizeof(u16)) =
+ cpu_to_le16(IEEE80211_CHAN_OFDM |
+ IEEE80211_CHAN_2GHZ);
+
+ rt_hdr->it_len = dst->len;
+
+ memcpy(skb_put(dst, len), src->data, len);
+
+ if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
+ dev_kfree_skb_any(dst);
+ }
+}
+#endif
+
static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
struct net_device *dev, int pri)
{
@@ -9871,6 +10335,11 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
goto fail_unlock;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (rtap_iface && netif_running(priv->prom_net_dev))
+ ipw_handle_promiscuous_tx(priv, txb);
+#endif
+
ret = ipw_tx_skb(priv, txb, pri);
if (ret == NETDEV_TX_OK)
__ipw_led_activity_on(priv);
@@ -10169,10 +10638,10 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->merge_networks,
(void (*)(void *))ipw_merge_adhoc_network, priv);
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
priv);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw_irq_tasklet, (unsigned long)priv);
@@ -10318,12 +10787,21 @@ static int ipw_config(struct ipw_priv *priv)
|= CFG_BT_COEXISTENCE_OOB;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
+ priv->sys_config.accept_all_data_frames = 1;
+ priv->sys_config.accept_non_directed_frames = 1;
+ priv->sys_config.accept_all_mgmt_bcpr = 1;
+ priv->sys_config.accept_all_mgmt_frames = 1;
+ }
+#endif
+
if (priv->ieee->iw_mode == IW_MODE_ADHOC)
priv->sys_config.answer_broadcast_ssid_probe = 1;
else
priv->sys_config.answer_broadcast_ssid_probe = 0;
- if (ipw_send_system_config(priv, &priv->sys_config))
+ if (ipw_send_system_config(priv))
goto error;
init_supported_rates(priv, &priv->rates);
@@ -10335,10 +10813,10 @@ static int ipw_config(struct ipw_priv *priv)
if (ipw_send_rts_threshold(priv, priv->rts_threshold))
goto error;
}
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
ipw_qos_activate(priv, NULL);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
if (ipw_set_random_seed(priv))
goto error;
@@ -10639,6 +11117,7 @@ static int ipw_up(struct ipw_priv *priv)
if (priv->cmdlog == NULL) {
IPW_ERROR("Error allocating %d command log entries.\n",
cmdlog);
+ return -ENOMEM;
} else {
memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
priv->cmdlog_len = cmdlog;
@@ -10860,6 +11339,10 @@ static struct attribute *ipw_sysfs_entries[] = {
&dev_attr_led.attr,
&dev_attr_speed_scan.attr,
&dev_attr_net_stats.attr,
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ &dev_attr_rtap_iface.attr,
+ &dev_attr_rtap_filter.attr,
+#endif
NULL
};
@@ -10868,6 +11351,109 @@ static struct attribute_group ipw_attribute_group = {
.attrs = ipw_sysfs_entries,
};
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static int ipw_prom_open(struct net_device *dev)
+{
+ struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
+ struct ipw_priv *priv = prom_priv->priv;
+
+ IPW_DEBUG_INFO("prom dev->open\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+ priv->sys_config.accept_all_data_frames = 1;
+ priv->sys_config.accept_non_directed_frames = 1;
+ priv->sys_config.accept_all_mgmt_bcpr = 1;
+ priv->sys_config.accept_all_mgmt_frames = 1;
+
+ ipw_send_system_config(priv);
+ }
+
+ return 0;
+}
+
+static int ipw_prom_stop(struct net_device *dev)
+{
+ struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
+ struct ipw_priv *priv = prom_priv->priv;
+
+ IPW_DEBUG_INFO("prom dev->stop\n");
+
+ if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+ priv->sys_config.accept_all_data_frames = 0;
+ priv->sys_config.accept_non_directed_frames = 0;
+ priv->sys_config.accept_all_mgmt_bcpr = 0;
+ priv->sys_config.accept_all_mgmt_frames = 0;
+
+ ipw_send_system_config(priv);
+ }
+
+ return 0;
+}
+
+static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ IPW_DEBUG_INFO("prom dev->xmit\n");
+ netif_stop_queue(dev);
+ return -EOPNOTSUPP;
+}
+
+static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
+{
+ struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
+ return &prom_priv->ieee->stats;
+}
+
+static int ipw_prom_alloc(struct ipw_priv *priv)
+{
+ int rc = 0;
+
+ if (priv->prom_net_dev)
+ return -EPERM;
+
+ priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
+ if (priv->prom_net_dev == NULL)
+ return -ENOMEM;
+
+ priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
+ priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
+ priv->prom_priv->priv = priv;
+
+ strcpy(priv->prom_net_dev->name, "rtap%d");
+
+ priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ priv->prom_net_dev->open = ipw_prom_open;
+ priv->prom_net_dev->stop = ipw_prom_stop;
+ priv->prom_net_dev->get_stats = ipw_prom_get_stats;
+ priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
+
+ priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
+
+ rc = register_netdev(priv->prom_net_dev);
+ if (rc) {
+ free_ieee80211(priv->prom_net_dev);
+ priv->prom_net_dev = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void ipw_prom_free(struct ipw_priv *priv)
+{
+ if (!priv->prom_net_dev)
+ return;
+
+ unregister_netdev(priv->prom_net_dev);
+ free_ieee80211(priv->prom_net_dev);
+
+ priv->prom_net_dev = NULL;
+}
+
+#endif
+
+
static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = 0;
@@ -10959,11 +11545,12 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->ieee->set_security = shim__set_security;
priv->ieee->is_queue_full = ipw_net_is_queue_full;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
+ priv->ieee->is_qos_active = ipw_is_qos_active;
priv->ieee->handle_probe_response = ipw_handle_beacon;
priv->ieee->handle_beacon = ipw_handle_probe_response;
priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
priv->ieee->perfect_rssi = -20;
priv->ieee->worst_rssi = -85;
@@ -10997,6 +11584,18 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_remove_sysfs;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (rtap_iface) {
+ err = ipw_prom_alloc(priv);
+ if (err) {
+ IPW_ERROR("Failed to register promiscuous network "
+ "device (error %d).\n", err);
+ unregister_netdev(priv->net_dev);
+ goto out_remove_sysfs;
+ }
+ }
+#endif
+
printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
"channels, %d 802.11a channels)\n",
priv->ieee->geo.name, priv->ieee->geo.bg_channels,
@@ -11076,6 +11675,10 @@ static void ipw_pci_remove(struct pci_dev *pdev)
priv->error = NULL;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ ipw_prom_free(priv);
+#endif
+
free_irq(pdev->irq, priv);
iounmap(priv->hw_base);
pci_release_regions(pdev);
@@ -11200,7 +11803,12 @@ MODULE_PARM_DESC(debug, "debug output mask");
module_param(channel, int, 0444);
MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+module_param(rtap_iface, int, 0444);
+MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
+#endif
+
+#ifdef CONFIG_IPW2200_QOS
module_param(qos_enable, int, 0444);
MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
@@ -11215,7 +11823,7 @@ MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
module_param(burst_duration_OFDM, int, 0444);
MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
#ifdef CONFIG_IPW2200_MONITOR
module_param(mode, int, 0444);
@@ -11238,5 +11846,8 @@ MODULE_PARM_DESC(cmdlog,
module_param(roaming, int, 0444);
MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
+module_param(antenna, int, 0444);
+MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
+
module_exit(ipw_exit);
module_init(ipw_init);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 4b9804900702..6044c0be2c80 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -789,7 +789,7 @@ struct ipw_sys_config {
u8 bt_coexist_collision_thr;
u8 silence_threshold;
u8 accept_all_mgmt_bcpr;
- u8 accept_all_mgtm_frames;
+ u8 accept_all_mgmt_frames;
u8 pass_noise_stats_to_host;
u8 reserved3;
} __attribute__ ((packed));
@@ -1122,6 +1122,52 @@ struct ipw_fw_error {
u8 payload[0];
} __attribute__ ((packed));
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+
+enum ipw_prom_filter {
+ IPW_PROM_CTL_HEADER_ONLY = (1 << 0),
+ IPW_PROM_MGMT_HEADER_ONLY = (1 << 1),
+ IPW_PROM_DATA_HEADER_ONLY = (1 << 2),
+ IPW_PROM_ALL_HEADER_ONLY = 0xf, /* bits 0..3 */
+ IPW_PROM_NO_TX = (1 << 4),
+ IPW_PROM_NO_RX = (1 << 5),
+ IPW_PROM_NO_CTL = (1 << 6),
+ IPW_PROM_NO_MGMT = (1 << 7),
+ IPW_PROM_NO_DATA = (1 << 8),
+};
+
+struct ipw_priv;
+struct ipw_prom_priv {
+ struct ipw_priv *priv;
+ struct ieee80211_device *ieee;
+ enum ipw_prom_filter filter;
+ int tx_packets;
+ int rx_packets;
+};
+#endif
+
+#if defined(CONFIG_IPW2200_RADIOTAP) || defined(CONFIG_IPW2200_PROMISCUOUS)
+/* Magic struct that slots into the radiotap header -- no reason
+ * to build this manually element by element, we can write it much
+ * more efficiently than we can parse it. ORDER MATTERS HERE
+ *
+ * When sent to us via the simulated Rx interface in sysfs, the entire
+ * structure is provided regardless of any bits unset.
+ */
+struct ipw_rt_hdr {
+ struct ieee80211_radiotap_header rt_hdr;
+ u64 rt_tsf; /* TSF */
+ u8 rt_flags; /* radiotap packet flags */
+ u8 rt_rate; /* rate in 500kb/s */
+ u16 rt_channel; /* channel in mhz */
+ u16 rt_chbitmask; /* channel bitfield */
+ s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
+ s8 rt_dbmnoise;
+ u8 rt_antenna; /* antenna number */
+ u8 payload[0]; /* payload... */
+} __attribute__ ((packed));
+#endif
+
struct ipw_priv {
/* ieee device used by generic ieee processing code */
struct ieee80211_device *ieee;
@@ -1133,6 +1179,12 @@ struct ipw_priv {
struct pci_dev *pci_dev;
struct net_device *net_dev;
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ /* Promiscuous mode */
+ struct ipw_prom_priv *prom_priv;
+ struct net_device *prom_net_dev;
+#endif
+
/* pci hardware address support */
void __iomem *hw_base;
unsigned long hw_len;
@@ -1153,11 +1205,9 @@ struct ipw_priv {
u32 config;
u32 capability;
- u8 last_rx_rssi;
- u8 last_noise;
struct average average_missed_beacons;
- struct average average_rssi;
- struct average average_noise;
+ s16 exp_avg_rssi;
+ s16 exp_avg_noise;
u32 port_type;
int rx_bufs_min; /**< minimum number of bufs in Rx queue */
int rx_pend_max; /**< maximum pending buffers for one IRQ */
@@ -1308,6 +1358,29 @@ struct ipw_priv {
/* debug macros */
+/* Debug and printf string expansion helpers for printing bitfields */
+#define BIT_FMT8 "%c%c%c%c-%c%c%c%c"
+#define BIT_FMT16 BIT_FMT8 ":" BIT_FMT8
+#define BIT_FMT32 BIT_FMT16 " " BIT_FMT16
+
+#define BITC(x,y) (((x>>y)&1)?'1':'0')
+#define BIT_ARG8(x) \
+BITC(x,7),BITC(x,6),BITC(x,5),BITC(x,4),\
+BITC(x,3),BITC(x,2),BITC(x,1),BITC(x,0)
+
+#define BIT_ARG16(x) \
+BITC(x,15),BITC(x,14),BITC(x,13),BITC(x,12),\
+BITC(x,11),BITC(x,10),BITC(x,9),BITC(x,8),\
+BIT_ARG8(x)
+
+#define BIT_ARG32(x) \
+BITC(x,31),BITC(x,30),BITC(x,29),BITC(x,28),\
+BITC(x,27),BITC(x,26),BITC(x,25),BITC(x,24),\
+BITC(x,23),BITC(x,22),BITC(x,21),BITC(x,20),\
+BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
+BIT_ARG16(x)
+
+
#ifdef CONFIG_IPW2200_DEBUG
#define IPW_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index c2d0b09e0418..b563decf599e 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -201,41 +201,12 @@ static struct {
/* Data types */
/********************************************************************/
-/* Used in Event handling.
- * We avoid nested structures as they break on ARM -- Moustafa */
-struct hermes_tx_descriptor_802_11 {
- /* hermes_tx_descriptor */
- __le16 status;
- __le16 reserved1;
- __le16 reserved2;
- __le32 sw_support;
- u8 retry_count;
- u8 tx_rate;
- __le16 tx_control;
-
- /* ieee80211_hdr */
+/* Beginning of the Tx descriptor, used in TxExc handling */
+struct hermes_txexc_data {
+ struct hermes_tx_descriptor desc;
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
-
- __le16 data_len;
-
- /* ethhdr */
- u8 h_dest[ETH_ALEN]; /* destination eth addr */
- u8 h_source[ETH_ALEN]; /* source ether addr */
- __be16 h_proto; /* packet type ID field */
-
- /* p8022_hdr */
- u8 dsap;
- u8 ssap;
- u8 ctrl;
- u8 oui[3];
-
- __be16 ethertype;
} __attribute__ ((packed));
/* Rx frame header except compatibility 802.3 header */
@@ -450,53 +421,39 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
hermes_t *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
- char *p;
struct ethhdr *eh;
- int len, data_len, data_off;
+ int data_off;
struct hermes_tx_descriptor desc;
unsigned long flags;
- TRACE_ENTER(dev->name);
-
if (! netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
dev->name);
- TRACE_EXIT(dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (netif_queue_stopped(dev)) {
printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
dev->name);
- TRACE_EXIT(dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (orinoco_lock(priv, &flags) != 0) {
printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
dev->name);
- TRACE_EXIT(dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) {
/* Oops, the firmware hasn't established a connection,
silently drop the packet (this seems to be the
safest approach). */
- stats->tx_errors++;
- orinoco_unlock(priv, &flags);
- dev_kfree_skb(skb);
- TRACE_EXIT(dev->name);
- return 0;
+ goto drop;
}
- /* Length of the packet body */
- /* FIXME: what if the skb is smaller than this? */
- len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
- skb = skb_padto(skb, len);
- if (skb == NULL)
- goto fail;
- len -= ETH_HLEN;
+ /* Check packet length */
+ if (skb->len < ETH_HLEN)
+ goto drop;
eh = (struct ethhdr *)skb->data;
@@ -507,8 +464,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx descriptor "
"to BAP\n", dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
/* Clear the 802.11 header and data length fields - some
@@ -519,50 +475,38 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
/* Encapsulate Ethernet-II frames */
if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
- struct header_struct hdr;
- data_len = len;
- data_off = HERMES_802_3_OFFSET + sizeof(hdr);
- p = skb->data + ETH_HLEN;
-
- /* 802.3 header */
- memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
- memcpy(hdr.src, eh->h_source, ETH_ALEN);
- hdr.len = htons(data_len + ENCAPS_OVERHEAD);
-
- /* 802.2 header */
- memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
-
- hdr.ethertype = eh->h_proto;
- err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
- txfid, HERMES_802_3_OFFSET);
+ struct header_struct {
+ struct ethhdr eth; /* 802.3 header */
+ u8 encap[6]; /* 802.2 header */
+ } __attribute__ ((packed)) hdr;
+
+ /* Strip destination and source from the data */
+ skb_pull(skb, 2 * ETH_ALEN);
+ data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
+
+ /* And move them to a separate header */
+ memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
+ hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
+ memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
+
+ err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
+ txfid, HERMES_802_3_OFFSET);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing packet "
"header to BAP\n", dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
- /* Actual xfer length - allow for padding */
- len = ALIGN(data_len, 2);
- if (len < ETH_ZLEN - ETH_HLEN)
- len = ETH_ZLEN - ETH_HLEN;
} else { /* IEEE 802.3 frame */
- data_len = len + ETH_HLEN;
data_off = HERMES_802_3_OFFSET;
- p = skb->data;
- /* Actual xfer length - round up for odd length packets */
- len = ALIGN(data_len, 2);
- if (len < ETH_ZLEN)
- len = ETH_ZLEN;
}
- err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
+ err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
txfid, data_off);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
/* Finally, we actually initiate the send */
@@ -575,25 +519,27 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d transmitting packet\n",
dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
dev->trans_start = jiffies;
- stats->tx_bytes += data_off + data_len;
+ stats->tx_bytes += data_off + skb->len;
+ goto ok;
- orinoco_unlock(priv, &flags);
+ drop:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ ok:
+ orinoco_unlock(priv, &flags);
dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
- TRACE_EXIT(dev->name);
-
- return 0;
- fail:
- TRACE_EXIT(dev->name);
-
+ busy:
+ if (err == -EIO)
+ schedule_work(&priv->reset_work);
orinoco_unlock(priv, &flags);
- return err;
+ return NETDEV_TX_BUSY;
}
static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
@@ -629,7 +575,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
struct net_device_stats *stats = &priv->stats;
u16 fid = hermes_read_regn(hw, TXCOMPLFID);
u16 status;
- struct hermes_tx_descriptor_802_11 hdr;
+ struct hermes_txexc_data hdr;
int err = 0;
if (fid == DUMMY_FID)
@@ -637,8 +583,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
/* Read part of the frame header - we need status and addr1 */
err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
- offsetof(struct hermes_tx_descriptor_802_11,
- addr2),
+ sizeof(struct hermes_txexc_data),
fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
@@ -658,7 +603,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
* exceeded, because that's the only status that really mean
* that this particular node went away.
* Other errors means that *we* screwed up. - Jean II */
- status = le16_to_cpu(hdr.status);
+ status = le16_to_cpu(hdr.desc.status);
if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
union iwreq_data wrqu;
@@ -1398,16 +1343,12 @@ int __orinoco_down(struct net_device *dev)
return 0;
}
-int orinoco_reinit_firmware(struct net_device *dev)
+static int orinoco_allocate_fid(struct net_device *dev)
{
struct orinoco_private *priv = netdev_priv(dev);
struct hermes *hw = &priv->hw;
int err;
- err = hermes_init(hw);
- if (err)
- return err;
-
err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
/* Try workaround for old Symbol firmware bug */
@@ -1426,6 +1367,19 @@ int orinoco_reinit_firmware(struct net_device *dev)
return err;
}
+int orinoco_reinit_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = hermes_init(hw);
+ if (!err)
+ err = orinoco_allocate_fid(dev);
+
+ return err;
+}
+
static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
@@ -2272,14 +2226,12 @@ static int orinoco_init(struct net_device *dev)
u16 reclen;
int len;
- TRACE_ENTER(dev->name);
-
/* No need to lock, the hw_unavailable flag is already set in
* alloc_orinocodev() */
priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
/* Initialize the firmware */
- err = orinoco_reinit_firmware(dev);
+ err = hermes_init(hw);
if (err != 0) {
printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
dev->name, err);
@@ -2337,6 +2289,13 @@ static int orinoco_init(struct net_device *dev)
printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
+ err = orinoco_allocate_fid(dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to allocate NIC buffer!\n",
+ dev->name);
+ goto out;
+ }
+
/* Get allowed channels */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
&priv->channel_mask);
@@ -2427,7 +2386,6 @@ static int orinoco_init(struct net_device *dev)
printk(KERN_DEBUG "%s: ready\n", dev->name);
out:
- TRACE_EXIT(dev->name);
return err;
}
@@ -2795,8 +2753,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
int numrates;
int i, k;
- TRACE_ENTER(dev->name);
-
rrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
@@ -2886,8 +2842,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
- TRACE_EXIT(dev->name);
-
return 0;
}
@@ -3069,8 +3023,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
int err = 0;
unsigned long flags;
- TRACE_ENTER(dev->name);
-
if (netif_running(dev)) {
err = orinoco_hw_get_essid(priv, &active, essidbuf);
if (err)
@@ -3085,8 +3037,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
erq->flags = 1;
erq->length = strlen(essidbuf) + 1;
- TRACE_EXIT(dev->name);
-
return 0;
}
@@ -4347,69 +4297,6 @@ static struct ethtool_ops orinoco_ethtool_ops = {
};
/********************************************************************/
-/* Debugging */
-/********************************************************************/
-
-#if 0
-static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
-{
- printk(KERN_DEBUG "RX descriptor:\n");
- printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
- printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
- printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
- printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
- printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
- printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
- printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
-
- printk(KERN_DEBUG "IEEE 802.11 header:\n");
- printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
- frame->p80211.frame_ctl);
- printk(KERN_DEBUG " duration_id = 0x%04x\n",
- frame->p80211.duration_id);
- printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr1[0], frame->p80211.addr1[1],
- frame->p80211.addr1[2], frame->p80211.addr1[3],
- frame->p80211.addr1[4], frame->p80211.addr1[5]);
- printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr2[0], frame->p80211.addr2[1],
- frame->p80211.addr2[2], frame->p80211.addr2[3],
- frame->p80211.addr2[4], frame->p80211.addr2[5]);
- printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr3[0], frame->p80211.addr3[1],
- frame->p80211.addr3[2], frame->p80211.addr3[3],
- frame->p80211.addr3[4], frame->p80211.addr3[5]);
- printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
- frame->p80211.seq_ctl);
- printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr4[0], frame->p80211.addr4[1],
- frame->p80211.addr4[2], frame->p80211.addr4[3],
- frame->p80211.addr4[4], frame->p80211.addr4[5]);
- printk(KERN_DEBUG " data_len = 0x%04x\n",
- frame->p80211.data_len);
-
- printk(KERN_DEBUG "IEEE 802.3 header:\n");
- printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p8023.h_dest[0], frame->p8023.h_dest[1],
- frame->p8023.h_dest[2], frame->p8023.h_dest[3],
- frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
- printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p8023.h_source[0], frame->p8023.h_source[1],
- frame->p8023.h_source[2], frame->p8023.h_source[3],
- frame->p8023.h_source[4], frame->p8023.h_source[5]);
- printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
-
- printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
- printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
- printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
- printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
- printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
- frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
- printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
-}
-#endif /* 0 */
-
-/********************************************************************/
/* Module initialization */
/********************************************************************/
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index f5d856db92a1..16db3e14b7d2 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -7,7 +7,7 @@
#ifndef _ORINOCO_H
#define _ORINOCO_H
-#define DRIVER_VERSION "0.15rc3"
+#define DRIVER_VERSION "0.15"
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -30,20 +30,6 @@ struct orinoco_key {
char data[ORINOCO_MAX_KEY_SIZE];
} __attribute__ ((packed));
-struct header_struct {
- /* 802.3 */
- u8 dest[ETH_ALEN];
- u8 src[ETH_ALEN];
- __be16 len;
- /* 802.2 */
- u8 dsap;
- u8 ssap;
- u8 ctrl;
- /* SNAP */
- u8 oui[3];
- unsigned short ethertype;
-} __attribute__ ((packed));
-
typedef enum {
FIRMWARE_TYPE_AGERE,
FIRMWARE_TYPE_INTERSIL,
@@ -132,9 +118,6 @@ extern int orinoco_debug;
#define DEBUG(n, args...) do { } while (0)
#endif /* ORINOCO_DEBUG */
-#define TRACE_ENTER(devname) DEBUG(2, "%s: -> %s()\n", devname, __FUNCTION__);
-#define TRACE_EXIT(devname) DEBUG(2, "%s: <- %s()\n", devname, __FUNCTION__);
-
/********************************************************************/
/* Exported prototypes */
/********************************************************************/
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 434f7d7ad841..b2aec4d9fbb1 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -147,14 +147,11 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ if (link->dev_node)
+ unregister_netdev(dev);
+
orinoco_cs_release(link);
- DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
- if (link->dev_node) {
- DEBUG(0, PFX "About to unregister net device %p\n",
- dev);
- unregister_netdev(dev);
- }
free_orinocodev(dev);
} /* orinoco_cs_detach */
@@ -178,13 +175,10 @@ orinoco_cs_config(struct pcmcia_device *link)
int last_fn, last_ret;
u_char buf[64];
config_info_t conf;
- cisinfo_t info;
tuple_t tuple;
cisparse_t parse;
void __iomem *mem;
- CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
-
/*
* This reads the card's CONFIG tuple to find its
* configuration registers.
@@ -234,12 +228,6 @@ orinoco_cs_config(struct pcmcia_device *link)
goto next_entry;
link->conf.ConfigIndex = cfg->index;
- /* Does this card need audio output? */
- if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
-
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
@@ -355,19 +343,10 @@ orinoco_cs_config(struct pcmcia_device *link)
net_device has been registered */
/* Finally, report what we've done */
- printk(KERN_DEBUG "%s: index 0x%02x: ",
- dev->name, link->conf.ConfigIndex);
- if (link->conf.Vpp)
- printk(", Vpp %d.%d", link->conf.Vpp / 10,
- link->conf.Vpp % 10);
- printk(", irq %d", link->irq.AssignedIRQ);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- printk("\n");
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
+ "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id,
+ link->irq.AssignedIRQ, link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
return 0;
@@ -436,7 +415,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
int err = 0;
- unsigned long flags;
if (! test_bit(0, &card->hard_reset_in_progress)) {
err = orinoco_reinit_firmware(dev);
@@ -446,7 +424,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
return -EIO;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
netif_device_attach(dev);
priv->hw_unavailable--;
@@ -458,10 +436,10 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
dev->name, err);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
}
- return 0;
+ return err;
}
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index d1a670b35338..74b9d5b2ba9e 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -1,9 +1,8 @@
/* orinoco_nortel.c
- *
+ *
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in
* Nortel emobility, Symbol LA-4113 and Symbol LA-4123.
- * but are connected to the PCI bus by a Nortel PCI-PCMCIA-Adapter.
*
* Copyright (C) 2002 Tobias Hoffmann
* (C) 2003 Christoph Jungegger <disdos@traum404.de>
@@ -50,67 +49,62 @@
#include <pcmcia/cisreg.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
-/* Nortel specific data */
-struct nortel_pci_card {
- unsigned long iobase1;
- unsigned long iobase2;
-};
-
/*
- * Do a soft reset of the PCI card using the Configuration Option Register
+ * Do a soft reset of the card using the Configuration Option Register
* We need this to get going...
* This is the part of the code that is strongly inspired from wlan-ng
*
* Note bis : Don't try to access HERMES_CMD during the reset phase.
* It just won't work !
*/
-static int nortel_pci_cor_reset(struct orinoco_private *priv)
+static int orinoco_nortel_cor_reset(struct orinoco_private *priv)
{
- struct nortel_pci_card *card = priv->card;
+ struct orinoco_pci_card *card = priv->card;
- /* Assert the reset until the card notice */
- outw_p(8, card->iobase1 + 2);
- inw(card->iobase2 + COR_OFFSET);
- outw_p(0x80, card->iobase2 + COR_OFFSET);
+ /* Assert the reset until the card notices */
+ iowrite16(8, card->bridge_io + 2);
+ ioread16(card->attr_io + COR_OFFSET);
+ iowrite16(0x80, card->attr_io + COR_OFFSET);
mdelay(1);
/* Give time for the card to recover from this hard effort */
- outw_p(0, card->iobase2 + COR_OFFSET);
- outw_p(0, card->iobase2 + COR_OFFSET);
+ iowrite16(0, card->attr_io + COR_OFFSET);
+ iowrite16(0, card->attr_io + COR_OFFSET);
mdelay(1);
- /* set COR as usual */
- outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
- outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
+ /* Set COR as usual */
+ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
+ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
- outw_p(0x228, card->iobase1 + 2);
+ iowrite16(0x228, card->bridge_io + 2);
return 0;
}
-static int nortel_pci_hw_init(struct nortel_pci_card *card)
+static int orinoco_nortel_hw_init(struct orinoco_pci_card *card)
{
int i;
u32 reg;
- /* setup bridge */
- if (inw(card->iobase1) & 1) {
+ /* Setup bridge */
+ if (ioread16(card->bridge_io) & 1) {
printk(KERN_ERR PFX "brg1 answer1 wrong\n");
return -EBUSY;
}
- outw_p(0x118, card->iobase1 + 2);
- outw_p(0x108, card->iobase1 + 2);
+ iowrite16(0x118, card->bridge_io + 2);
+ iowrite16(0x108, card->bridge_io + 2);
mdelay(30);
- outw_p(0x8, card->iobase1 + 2);
+ iowrite16(0x8, card->bridge_io + 2);
for (i = 0; i < 30; i++) {
mdelay(30);
- if (inw(card->iobase1) & 0x10) {
+ if (ioread16(card->bridge_io) & 0x10) {
break;
}
}
@@ -118,42 +112,42 @@ static int nortel_pci_hw_init(struct nortel_pci_card *card)
printk(KERN_ERR PFX "brg1 timed out\n");
return -EBUSY;
}
- if (inw(card->iobase2 + 0xe0) & 1) {
+ if (ioread16(card->attr_io + COR_OFFSET) & 1) {
printk(KERN_ERR PFX "brg2 answer1 wrong\n");
return -EBUSY;
}
- if (inw(card->iobase2 + 0xe2) & 1) {
+ if (ioread16(card->attr_io + COR_OFFSET + 2) & 1) {
printk(KERN_ERR PFX "brg2 answer2 wrong\n");
return -EBUSY;
}
- if (inw(card->iobase2 + 0xe4) & 1) {
+ if (ioread16(card->attr_io + COR_OFFSET + 4) & 1) {
printk(KERN_ERR PFX "brg2 answer3 wrong\n");
return -EBUSY;
}
- /* set the PCMCIA COR-Register */
- outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
+ /* Set the PCMCIA COR register */
+ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
- reg = inw(card->iobase2 + COR_OFFSET);
+ reg = ioread16(card->attr_io + COR_OFFSET);
if (reg != COR_VALUE) {
printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n",
reg);
return -EBUSY;
}
- /* set leds */
- outw_p(1, card->iobase1 + 10);
+ /* Set LEDs */
+ iowrite16(1, card->bridge_io + 10);
return 0;
}
-static int nortel_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int orinoco_nortel_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int err;
struct orinoco_private *priv;
- struct nortel_pci_card *card;
+ struct orinoco_pci_card *card;
struct net_device *dev;
- void __iomem *iomem;
+ void __iomem *hermes_io, *bridge_io, *attr_io;
err = pci_enable_device(pdev);
if (err) {
@@ -162,19 +156,34 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- iomem = pci_iomap(pdev, 2, 0);
- if (!iomem) {
- err = -ENOMEM;
- goto fail_map_io;
+ bridge_io = pci_iomap(pdev, 0, 0);
+ if (!bridge_io) {
+ printk(KERN_ERR PFX "Cannot map bridge registers\n");
+ err = -EIO;
+ goto fail_map_bridge;
+ }
+
+ attr_io = pci_iomap(pdev, 1, 0);
+ if (!attr_io) {
+ printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
+ err = -EIO;
+ goto fail_map_attr;
+ }
+
+ hermes_io = pci_iomap(pdev, 2, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot map chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
- dev = alloc_orinocodev(sizeof(*card), nortel_pci_cor_reset);
+ dev = alloc_orinocodev(sizeof(*card), orinoco_nortel_cor_reset);
if (!dev) {
printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
@@ -183,16 +192,12 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
priv = netdev_priv(dev);
card = priv->card;
- card->iobase1 = pci_resource_start(pdev, 0);
- card->iobase2 = pci_resource_start(pdev, 1);
- dev->base_addr = pci_resource_start(pdev, 2);
+ card->bridge_io = bridge_io;
+ card->attr_io = attr_io;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, iomem, HERMES_16BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected Nortel PCI device at %s irq:%d, "
- "io addr:0x%lx\n", pci_name(pdev), pdev->irq, dev->base_addr);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -201,21 +206,19 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
- err = nortel_pci_hw_init(card);
+ err = orinoco_nortel_hw_init(card);
if (err) {
printk(KERN_ERR PFX "Hardware initialization failed\n");
goto fail;
}
- err = nortel_pci_cor_reset(priv);
+ err = orinoco_nortel_cor_reset(priv);
if (err) {
printk(KERN_ERR PFX "Initial reset failed\n");
goto fail;
}
-
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register network device\n");
@@ -223,6 +226,8 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -234,9 +239,15 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- pci_iounmap(pdev, iomem);
+ pci_iounmap(pdev, hermes_io);
- fail_map_io:
+ fail_map_hermes:
+ pci_iounmap(pdev, attr_io);
+
+ fail_map_attr:
+ pci_iounmap(pdev, bridge_io);
+
+ fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
@@ -245,26 +256,27 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
return err;
}
-static void __devexit nortel_pci_remove_one(struct pci_dev *pdev)
+static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = netdev_priv(dev);
- struct nortel_pci_card *card = priv->card;
+ struct orinoco_pci_card *card = priv->card;
- /* clear leds */
- outw_p(0, card->iobase1 + 10);
+ /* Clear LEDs */
+ iowrite16(0, card->bridge_io + 10);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
pci_iounmap(pdev, priv->hw.iobase);
+ pci_iounmap(pdev, card->attr_io);
+ pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-
-static struct pci_device_id nortel_pci_id_table[] = {
+static struct pci_device_id orinoco_nortel_id_table[] = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
@@ -272,13 +284,15 @@ static struct pci_device_id nortel_pci_id_table[] = {
{0,},
};
-MODULE_DEVICE_TABLE(pci, nortel_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_nortel_id_table);
-static struct pci_driver nortel_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = nortel_pci_id_table,
- .probe = nortel_pci_init_one,
- .remove = __devexit_p(nortel_pci_remove_one),
+static struct pci_driver orinoco_nortel_driver = {
+ .name = DRIVER_NAME,
+ .id_table = orinoco_nortel_id_table,
+ .probe = orinoco_nortel_init_one,
+ .remove = __devexit_p(orinoco_nortel_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
@@ -288,20 +302,19 @@ MODULE_DESCRIPTION
("Driver for wireless LAN cards using the Nortel PCI bridge");
MODULE_LICENSE("Dual MPL/GPL");
-static int __init nortel_pci_init(void)
+static int __init orinoco_nortel_init(void)
{
printk(KERN_DEBUG "%s\n", version);
- return pci_module_init(&nortel_pci_driver);
+ return pci_module_init(&orinoco_nortel_driver);
}
-static void __exit nortel_pci_exit(void)
+static void __exit orinoco_nortel_exit(void)
{
- pci_unregister_driver(&nortel_pci_driver);
- ssleep(1);
+ pci_unregister_driver(&orinoco_nortel_driver);
}
-module_init(nortel_pci_init);
-module_exit(nortel_pci_exit);
+module_init(orinoco_nortel_init);
+module_exit(orinoco_nortel_exit);
/*
* Local variables:
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 5362c214fc8e..1c105f40f8d5 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -1,11 +1,11 @@
/* orinoco_pci.c
*
- * Driver for Prism II devices that have a direct PCI interface
- * (i.e., not in a Pcmcia or PLX bridge)
- *
- * Specifically here we're talking about the Linksys WMP11
+ * Driver for Prism 2.5/3 devices that have a direct PCI interface
+ * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge).
+ * The card contains only one PCI region, which contains all the usual
+ * hermes registers, as well as the COR register.
*
- * Current maintainers (as of 29 September 2003) are:
+ * Current maintainers are:
* Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
@@ -41,54 +41,6 @@
* under either the MPL or the GPL.
*/
-/*
- * Theory of operation...
- * -------------------
- * Maybe you had a look in orinoco_plx. Well, this is totally different...
- *
- * The card contains only one PCI region, which contains all the usual
- * hermes registers.
- *
- * The driver will memory map this region in normal memory. Because
- * the hermes registers are mapped in normal memory and not in ISA I/O
- * post space, we can't use the usual inw/outw macros and we need to
- * use readw/writew.
- * This slight difference force us to compile our own version of
- * hermes.c with the register access macro changed. That's a bit
- * hackish but works fine.
- *
- * Note that the PCI region is pretty big (4K). That's much more than
- * the usual set of hermes register (0x0 -> 0x3E). I've got a strong
- * suspicion that the whole memory space of the adapter is in fact in
- * this region. Accessing directly the adapter memory instead of going
- * through the usual register would speed up significantely the
- * operations...
- *
- * Finally, the card looks like this :
------------------------
- Bus 0, device 14, function 0:
- Network controller: PCI device 1260:3873 (Harris Semiconductor) (rev 1).
- IRQ 11.
- Master Capable. Latency=248.
- Prefetchable 32 bit memory at 0xffbcc000 [0xffbccfff].
------------------------
-00:0e.0 Network controller: Harris Semiconductor: Unknown device 3873 (rev 01)
- Subsystem: Unknown device 1737:3874
- Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B-
- Status: Cap+ 66Mhz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR-
- Latency: 248 set, cache line size 08
- Interrupt: pin A routed to IRQ 11
- Region 0: Memory at ffbcc000 (32-bit, prefetchable) [size=4K]
- Capabilities: [dc] Power Management version 2
- Flags: PMEClk- AuxPwr- DSI- D1+ D2+ PME+
- Status: D0 PME-Enable- DSel=0 DScale=0 PME-
------------------------
- *
- * That's all..
- *
- * Jean II
- */
-
#define DRIVER_NAME "orinoco_pci"
#define PFX DRIVER_NAME ": "
@@ -100,12 +52,14 @@
#include <linux/pci.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
-/* All the magic there is from wlan-ng */
-/* Magic offset of the reset register of the PCI card */
+/* Offset of the COR register of the PCI card */
#define HERMES_PCI_COR (0x26)
-/* Magic bitmask to reset the card */
+
+/* Bitmask to reset the card */
#define HERMES_PCI_COR_MASK (0x0080)
+
/* Magic timeouts for doing the reset.
* Those times are straight from wlan-ng, and it is claimed that they
* are necessary. Alan will kill me. Take your time and grab a coffee. */
@@ -113,13 +67,8 @@
#define HERMES_PCI_COR_OFFT (500) /* ms */
#define HERMES_PCI_COR_BUSYT (500) /* ms */
-/* Orinoco PCI specific data */
-struct orinoco_pci_card {
- void __iomem *pci_ioaddr;
-};
-
/*
- * Do a soft reset of the PCI card using the Configuration Option Register
+ * Do a soft reset of the card using the Configuration Option Register
* We need this to get going...
* This is the part of the code that is strongly inspired from wlan-ng
*
@@ -131,14 +80,13 @@ struct orinoco_pci_card {
* Note bis : Don't try to access HERMES_CMD during the reset phase.
* It just won't work !
*/
-static int
-orinoco_pci_cor_reset(struct orinoco_private *priv)
+static int orinoco_pci_cor_reset(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
- unsigned long timeout;
- u16 reg;
+ unsigned long timeout;
+ u16 reg;
- /* Assert the reset until the card notice */
+ /* Assert the reset until the card notices */
hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK);
mdelay(HERMES_PCI_COR_ONT);
@@ -163,19 +111,14 @@ orinoco_pci_cor_reset(struct orinoco_private *priv)
return 0;
}
-/*
- * Initialise a card. Mostly similar to PLX code.
- */
static int orinoco_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err = 0;
- unsigned long pci_iorange;
- u16 __iomem *pci_ioaddr = NULL;
- unsigned long pci_iolen;
- struct orinoco_private *priv = NULL;
+ int err;
+ struct orinoco_private *priv;
struct orinoco_pci_card *card;
- struct net_device *dev = NULL;
+ struct net_device *dev;
+ void __iomem *hermes_io;
err = pci_enable_device(pdev);
if (err) {
@@ -184,39 +127,32 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- /* Resource 0 is mapped to the hermes registers */
- pci_iorange = pci_resource_start(pdev, 0);
- pci_iolen = pci_resource_len(pdev, 0);
- pci_ioaddr = ioremap(pci_iorange, pci_iolen);
- if (!pci_iorange) {
- printk(KERN_ERR PFX "Cannot remap hardware registers\n");
- goto fail_map;
+ hermes_io = pci_iomap(pdev, 0, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot remap chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset);
- if (! dev) {
+ if (!dev) {
+ printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
goto fail_alloc;
}
priv = netdev_priv(dev);
card = priv->card;
- card->pci_ioaddr = pci_ioaddr;
- dev->mem_start = pci_iorange;
- dev->mem_end = pci_iorange + pci_iolen - 1;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, pci_ioaddr, HERMES_32BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected device %s, mem:0x%lx-0x%lx, irq %d\n",
- pci_name(pdev), dev->mem_start, dev->mem_end, pdev->irq);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -225,9 +161,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
- /* Perform a COR reset to start the card */
err = orinoco_pci_cor_reset(priv);
if (err) {
printk(KERN_ERR PFX "Initial reset failed\n");
@@ -236,11 +170,13 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Failed to register net device\n");
+ printk(KERN_ERR PFX "Cannot register network device\n");
goto fail;
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -252,9 +188,9 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- iounmap(pci_ioaddr);
+ pci_iounmap(pdev, hermes_io);
- fail_map:
+ fail_map_hermes:
pci_release_regions(pdev);
fail_resources:
@@ -267,87 +203,17 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = netdev_priv(dev);
- struct orinoco_pci_card *card = priv->card;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
- iounmap(card->pci_ioaddr);
+ pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err;
-
-
- err = orinoco_lock(priv, &flags);
- if (err) {
- printk(KERN_ERR "%s: hw_unavailable on orinoco_pci_suspend\n",
- dev->name);
- return err;
- }
-
- err = __orinoco_down(dev);
- if (err)
- printk(KERN_WARNING "%s: orinoco_pci_suspend(): Error %d downing interface\n",
- dev->name, err);
-
- netif_device_detach(dev);
-
- priv->hw_unavailable++;
-
- orinoco_unlock(priv, &flags);
-
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int orinoco_pci_resume(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err;
-
- printk(KERN_DEBUG "%s: Orinoco-PCI waking up\n", dev->name);
-
- pci_set_power_state(pdev, 0);
- pci_restore_state(pdev);
-
- err = orinoco_reinit_firmware(dev);
- if (err) {
- printk(KERN_ERR "%s: Error %d re-initializing firmware on orinoco_pci_resume()\n",
- dev->name, err);
- return err;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- netif_device_attach(dev);
-
- priv->hw_unavailable--;
-
- if (priv->open && (! priv->hw_unavailable)) {
- err = __orinoco_up(dev);
- if (err)
- printk(KERN_ERR "%s: Error %d restarting card on orinoco_pci_resume()\n",
- dev->name, err);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static struct pci_device_id orinoco_pci_pci_id_table[] = {
+static struct pci_device_id orinoco_pci_id_table[] = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
@@ -357,11 +223,11 @@ static struct pci_device_id orinoco_pci_pci_id_table[] = {
{0,},
};
-MODULE_DEVICE_TABLE(pci, orinoco_pci_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_pci_id_table);
static struct pci_driver orinoco_pci_driver = {
.name = DRIVER_NAME,
- .id_table = orinoco_pci_pci_id_table,
+ .id_table = orinoco_pci_id_table,
.probe = orinoco_pci_init_one,
.remove = __devexit_p(orinoco_pci_remove_one),
.suspend = orinoco_pci_suspend,
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco_pci.h
new file mode 100644
index 000000000000..7eb1e08113e0
--- /dev/null
+++ b/drivers/net/wireless/orinoco_pci.h
@@ -0,0 +1,104 @@
+/* orinoco_pci.h
+ *
+ * Common code for all Orinoco drivers for PCI devices, including
+ * both native PCI and PCMCIA-to-PCI bridges.
+ *
+ * Copyright (C) 2005, Pavel Roskin.
+ * See orinoco.c for license.
+ */
+
+#ifndef _ORINOCO_PCI_H
+#define _ORINOCO_PCI_H
+
+#include <linux/netdevice.h>
+
+/* Driver specific data */
+struct orinoco_pci_card {
+ void __iomem *bridge_io;
+ void __iomem *attr_io;
+};
+
+#ifdef CONFIG_PM
+static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ err = orinoco_lock(priv, &flags);
+ if (err) {
+ printk(KERN_ERR "%s: cannot lock hardware for suspend\n",
+ dev->name);
+ return err;
+ }
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: error %d bringing interface down "
+ "for suspend\n", dev->name, err);
+
+ netif_device_detach(dev);
+
+ priv->hw_unavailable++;
+
+ orinoco_unlock(priv, &flags);
+
+ free_irq(pdev->irq, dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int orinoco_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ pci_set_power_state(pdev, 0);
+ pci_enable_device(pdev);
+ pci_restore_state(pdev);
+
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "%s: cannot re-allocate IRQ on resume\n",
+ dev->name);
+ pci_disable_device(pdev);
+ return -EBUSY;
+ }
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: error %d re-initializing firmware "
+ "on resume\n", dev->name, err);
+ return err;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+
+ priv->hw_unavailable--;
+
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card on resume\n",
+ dev->name, err);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+#else
+#define orinoco_pci_suspend NULL
+#define orinoco_pci_resume NULL
+#endif
+
+#endif /* _ORINOCO_PCI_H */
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 210e73776545..84f696c77551 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -3,7 +3,7 @@
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a PLX9052.
*
- * Current maintainers (as of 29 September 2003) are:
+ * Current maintainers are:
* Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
@@ -30,38 +30,18 @@
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
-
- * Caution: this is experimental and probably buggy. For success and
- * failure reports for different cards and adaptors, see
- * orinoco_plx_pci_id_table near the end of the file. If you have a
- * card we don't have the PCI id for, and looks like it should work,
- * drop me mail with the id and "it works"/"it doesn't work".
- *
- * Note: if everything gets detected fine but it doesn't actually send
- * or receive packets, your first port of call should probably be to
- * try newer firmware in the card. Especially if you're doing Ad-Hoc
- * modes.
- *
- * The actual driving is done by orinoco.c, this is just resource
- * allocation stuff. The explanation below is courtesy of Ryan Niemi
- * on the linux-wlan-ng list at
- * http://archives.neohapsis.com/archives/dev/linux-wlan/2001-q1/0026.html
*
- * The PLX9052-based cards (WL11000 and several others) are a
- * different beast than the usual PCMCIA-based PRISM2 configuration
- * expected by wlan-ng. Here's the general details on how the WL11000
- * PCI adapter works:
+ * Here's the general details on how the PLX9052 adapter works:
*
* - Two PCI I/O address spaces, one 0x80 long which contains the
* PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA
* slot I/O address space.
*
- * - One PCI memory address space, mapped to the PCMCIA memory space
+ * - One PCI memory address space, mapped to the PCMCIA attribute space
* (containing the CIS).
*
- * After identifying the I/O and memory space, you can read through
- * the memory space to confirm the CIS's device ID or manufacturer ID
- * to make sure it's the expected card. qKeep in mind that the PCMCIA
+ * Using the later, you can read through the CIS data to make sure the
+ * card is compatible with the driver. Keep in mind that the PCMCIA
* spec specifies the CIS as the lower 8 bits of each word read from
* the CIS, so to read the bytes of the CIS, read every other byte
* (0,2,4,...). Passing that test, you need to enable the I/O address
@@ -71,7 +51,7 @@
* within the PCI memory space. Write 0x41 to the COR register to
* enable I/O mode and to select level triggered interrupts. To
* confirm you actually succeeded, read the COR register back and make
- * sure it actually got set to 0x41, incase you have an unexpected
+ * sure it actually got set to 0x41, in case you have an unexpected
* card inserted.
*
* Following that, you can treat the second PCI I/O address space (the
@@ -101,16 +81,6 @@
* that, I've hot-swapped a number of times during debugging and
* driver development for various reasons (stuck WAIT# line after the
* radio card's firmware locks up).
- *
- * Hope this is enough info for someone to add PLX9052 support to the
- * wlan-ng card. In the case of the WL11000, the PCI ID's are
- * 0x1639/0x0200, with matching subsystem ID's. Other PLX9052-based
- * manufacturers other than Eumitcom (or on cards other than the
- * WL11000) may have different PCI ID's.
- *
- * If anyone needs any more specific info, let me know. I haven't had
- * time to implement support myself yet, and with the way things are
- * going, might not have time for a while..
*/
#define DRIVER_NAME "orinoco_plx"
@@ -125,6 +95,7 @@
#include <pcmcia/cisreg.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
@@ -134,30 +105,20 @@
#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
#define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */
-static const u8 cis_magic[] = {
- 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
-};
-
-/* Orinoco PLX specific data */
-struct orinoco_plx_card {
- void __iomem *attr_mem;
-};
-
/*
* Do a soft reset of the card using the Configuration Option Register
*/
static int orinoco_plx_cor_reset(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
- struct orinoco_plx_card *card = priv->card;
- u8 __iomem *attr_mem = card->attr_mem;
+ struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
- writeb(COR_VALUE | COR_RESET, attr_mem + COR_OFFSET);
+ iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET);
mdelay(1);
- writeb(COR_VALUE, attr_mem + COR_OFFSET);
+ iowrite8(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
@@ -168,7 +129,7 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv)
reg = hermes_read_regn(hw, CMD);
}
- /* Did we timeout ? */
+ /* Still busy? */
if (reg & HERMES_CMD_BUSY) {
printk(KERN_ERR PFX "Busy timeout\n");
return -ETIMEDOUT;
@@ -177,20 +138,55 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv)
return 0;
}
+static int orinoco_plx_hw_init(struct orinoco_pci_card *card)
+{
+ int i;
+ u32 csr_reg;
+ static const u8 cis_magic[] = {
+ 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
+ };
+
+ printk(KERN_DEBUG PFX "CIS: ");
+ for (i = 0; i < 16; i++) {
+ printk("%02X:", ioread8(card->attr_io + (i << 1)));
+ }
+ printk("\n");
+
+ /* Verify whether a supported PC card is present */
+ /* FIXME: we probably need to be smarted about this */
+ for (i = 0; i < sizeof(cis_magic); i++) {
+ if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) {
+ printk(KERN_ERR PFX "The CIS value of Prism2 PC "
+ "card is unexpected\n");
+ return -ENODEV;
+ }
+ }
+
+ /* bjoern: We need to tell the card to enable interrupts, in
+ case the serial eprom didn't do this already. See the
+ PLX9052 data book, p8-1 and 8-24 for reference. */
+ csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
+ if (!(csr_reg & PLX_INTCSR_INTEN)) {
+ csr_reg |= PLX_INTCSR_INTEN;
+ iowrite32(csr_reg, card->bridge_io + PLX_INTCSR);
+ csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
+ if (!(csr_reg & PLX_INTCSR_INTEN)) {
+ printk(KERN_ERR PFX "Cannot enable interrupts\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
static int orinoco_plx_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err = 0;
- u8 __iomem *attr_mem = NULL;
- u32 csr_reg, plx_addr;
- struct orinoco_private *priv = NULL;
- struct orinoco_plx_card *card;
- unsigned long pccard_ioaddr = 0;
- unsigned long pccard_iolen = 0;
- struct net_device *dev = NULL;
- void __iomem *mem;
- int i;
+ int err;
+ struct orinoco_private *priv;
+ struct orinoco_pci_card *card;
+ struct net_device *dev;
+ void __iomem *hermes_io, *attr_io, *bridge_io;
err = pci_enable_device(pdev);
if (err) {
@@ -199,30 +195,30 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- /* Resource 1 is mapped to PLX-specific registers */
- plx_addr = pci_resource_start(pdev, 1);
+ bridge_io = pci_iomap(pdev, 1, 0);
+ if (!bridge_io) {
+ printk(KERN_ERR PFX "Cannot map bridge registers\n");
+ err = -EIO;
+ goto fail_map_bridge;
+ }
- /* Resource 2 is mapped to the PCMCIA attribute memory */
- attr_mem = ioremap(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- if (!attr_mem) {
- printk(KERN_ERR PFX "Cannot remap PCMCIA space\n");
+ attr_io = pci_iomap(pdev, 2, 0);
+ if (!attr_io) {
+ printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
+ err = -EIO;
goto fail_map_attr;
}
- /* Resource 3 is mapped to the PCMCIA I/O address space */
- pccard_ioaddr = pci_resource_start(pdev, 3);
- pccard_iolen = pci_resource_len(pdev, 3);
-
- mem = pci_iomap(pdev, 3, 0);
- if (!mem) {
- err = -ENOMEM;
- goto fail_map_io;
+ hermes_io = pci_iomap(pdev, 3, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot map chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
@@ -235,16 +231,12 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
priv = netdev_priv(dev);
card = priv->card;
- card->attr_mem = attr_mem;
- dev->base_addr = pccard_ioaddr;
+ card->bridge_io = bridge_io;
+ card->attr_io = attr_io;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 PLX device "
- "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
- pccard_ioaddr);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -253,20 +245,11 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
- /* bjoern: We need to tell the card to enable interrupts, in
- case the serial eprom didn't do this already. See the
- PLX9052 data book, p8-1 and 8-24 for reference. */
- csr_reg = inl(plx_addr + PLX_INTCSR);
- if (!(csr_reg & PLX_INTCSR_INTEN)) {
- csr_reg |= PLX_INTCSR_INTEN;
- outl(csr_reg, plx_addr + PLX_INTCSR);
- csr_reg = inl(plx_addr + PLX_INTCSR);
- if (!(csr_reg & PLX_INTCSR_INTEN)) {
- printk(KERN_ERR PFX "Cannot enable interrupts\n");
- goto fail;
- }
+ err = orinoco_plx_hw_init(card);
+ if (err) {
+ printk(KERN_ERR PFX "Hardware initialization failed\n");
+ goto fail;
}
err = orinoco_plx_cor_reset(priv);
@@ -275,23 +258,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
goto fail;
}
- printk(KERN_DEBUG PFX "CIS: ");
- for (i = 0; i < 16; i++) {
- printk("%02X:", readb(attr_mem + 2*i));
- }
- printk("\n");
-
- /* Verify whether a supported PC card is present */
- /* FIXME: we probably need to be smarted about this */
- for (i = 0; i < sizeof(cis_magic); i++) {
- if (cis_magic[i] != readb(attr_mem +2*i)) {
- printk(KERN_ERR PFX "The CIS value of Prism2 PC "
- "card is unexpected\n");
- err = -EIO;
- goto fail;
- }
- }
-
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register network device\n");
@@ -299,6 +265,8 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -310,12 +278,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- pci_iounmap(pdev, mem);
+ pci_iounmap(pdev, hermes_io);
- fail_map_io:
- iounmap(attr_mem);
+ fail_map_hermes:
+ pci_iounmap(pdev, attr_io);
fail_map_attr:
+ pci_iounmap(pdev, bridge_io);
+
+ fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
@@ -328,23 +299,20 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = netdev_priv(dev);
- struct orinoco_plx_card *card = priv->card;
- u8 __iomem *attr_mem = card->attr_mem;
-
- BUG_ON(! dev);
+ struct orinoco_pci_card *card = priv->card;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
pci_iounmap(pdev, priv->hw.iobase);
- iounmap(attr_mem);
+ pci_iounmap(pdev, card->attr_io);
+ pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-
-static struct pci_device_id orinoco_plx_pci_id_table[] = {
+static struct pci_device_id orinoco_plx_id_table[] = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
@@ -362,13 +330,15 @@ static struct pci_device_id orinoco_plx_pci_id_table[] = {
{0,},
};
-MODULE_DEVICE_TABLE(pci, orinoco_plx_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table);
static struct pci_driver orinoco_plx_driver = {
.name = DRIVER_NAME,
- .id_table = orinoco_plx_pci_id_table,
+ .id_table = orinoco_plx_id_table,
.probe = orinoco_plx_init_one,
.remove = __devexit_p(orinoco_plx_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
@@ -388,7 +358,6 @@ static int __init orinoco_plx_init(void)
static void __exit orinoco_plx_exit(void)
{
pci_unregister_driver(&orinoco_plx_driver);
- ssleep(1);
}
module_init(orinoco_plx_init);
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 5e68b7026186..d2b4decb7a7d 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -1,5 +1,5 @@
/* orinoco_tmd.c
- *
+ *
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a TMD7160.
*
@@ -26,25 +26,13 @@
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
-
- * Caution: this is experimental and probably buggy. For success and
- * failure reports for different cards and adaptors, see
- * orinoco_tmd_pci_id_table near the end of the file. If you have a
- * card we don't have the PCI id for, and looks like it should work,
- * drop me mail with the id and "it works"/"it doesn't work".
- *
- * Note: if everything gets detected fine but it doesn't actually send
- * or receive packets, your first port of call should probably be to
- * try newer firmware in the card. Especially if you're doing Ad-Hoc
- * modes
*
* The actual driving is done by orinoco.c, this is just resource
* allocation stuff.
*
* This driver is modeled after the orinoco_plx driver. The main
- * difference is that the TMD chip has only IO port ranges and no
- * memory space, i.e. no access to the CIS. Compared to the PLX chip,
- * the io range functionalities are exchanged.
+ * difference is that the TMD chip has only IO port ranges and doesn't
+ * provide access to the PCMCIA attribute space.
*
* Pheecom sells cards with the TMD chip as "ASIC version"
*/
@@ -61,32 +49,26 @@
#include <pcmcia/cisreg.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
#define COR_RESET (0x80) /* reset bit in the COR register */
#define TMD_RESET_TIME (500) /* milliseconds */
-/* Orinoco TMD specific data */
-struct orinoco_tmd_card {
- u32 tmd_io;
-};
-
-
/*
* Do a soft reset of the card using the Configuration Option Register
*/
static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
- struct orinoco_tmd_card *card = priv->card;
- u32 addr = card->tmd_io;
+ struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
- outb(COR_VALUE | COR_RESET, addr);
+ iowrite8(COR_VALUE | COR_RESET, card->bridge_io);
mdelay(1);
- outb(COR_VALUE, addr);
+ iowrite8(COR_VALUE, card->bridge_io);
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
@@ -97,7 +79,7 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
reg = hermes_read_regn(hw, CMD);
}
- /* Did we timeout ? */
+ /* Still busy? */
if (reg & HERMES_CMD_BUSY) {
printk(KERN_ERR PFX "Busy timeout\n");
return -ETIMEDOUT;
@@ -110,11 +92,11 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
static int orinoco_tmd_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err = 0;
- struct orinoco_private *priv = NULL;
- struct orinoco_tmd_card *card;
- struct net_device *dev = NULL;
- void __iomem *mem;
+ int err;
+ struct orinoco_private *priv;
+ struct orinoco_pci_card *card;
+ struct net_device *dev;
+ void __iomem *hermes_io, *bridge_io;
err = pci_enable_device(pdev);
if (err) {
@@ -123,20 +105,28 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- mem = pci_iomap(pdev, 2, 0);
- if (! mem) {
- err = -ENOMEM;
- goto fail_iomap;
+ bridge_io = pci_iomap(pdev, 1, 0);
+ if (!bridge_io) {
+ printk(KERN_ERR PFX "Cannot map bridge registers\n");
+ err = -EIO;
+ goto fail_map_bridge;
+ }
+
+ hermes_io = pci_iomap(pdev, 2, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot map chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset);
- if (! dev) {
+ if (!dev) {
printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
goto fail_alloc;
@@ -144,16 +134,11 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
priv = netdev_priv(dev);
card = priv->card;
- card->tmd_io = pci_resource_start(pdev, 1);
- dev->base_addr = pci_resource_start(pdev, 2);
+ card->bridge_io = bridge_io;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 TMD device "
- "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
- dev->base_addr);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -162,7 +147,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
err = orinoco_tmd_cor_reset(priv);
if (err) {
@@ -177,6 +161,8 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -188,9 +174,12 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- pci_iounmap(pdev, mem);
+ pci_iounmap(pdev, hermes_io);
+
+ fail_map_hermes:
+ pci_iounmap(pdev, bridge_io);
- fail_iomap:
+ fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
@@ -203,31 +192,32 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = dev->priv;
-
- BUG_ON(! dev);
+ struct orinoco_pci_card *card = priv->card;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
pci_iounmap(pdev, priv->hw.iobase);
+ pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-
-static struct pci_device_id orinoco_tmd_pci_id_table[] = {
+static struct pci_device_id orinoco_tmd_id_table[] = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
-MODULE_DEVICE_TABLE(pci, orinoco_tmd_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_tmd_id_table);
static struct pci_driver orinoco_tmd_driver = {
.name = DRIVER_NAME,
- .id_table = orinoco_tmd_pci_id_table,
+ .id_table = orinoco_tmd_id_table,
.probe = orinoco_tmd_init_one,
.remove = __devexit_p(orinoco_tmd_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
@@ -245,7 +235,6 @@ static int __init orinoco_tmd_init(void)
static void __exit orinoco_tmd_exit(void)
{
pci_unregister_driver(&orinoco_tmd_driver);
- ssleep(1);
}
module_init(orinoco_tmd_init);
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index f7b77ce54d7b..7f9aa139c347 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -1,6 +1,6 @@
/*
* Driver for 802.11b cards using RAM-loadable Symbol firmware, such as
- * Symbol Wireless Networker LA4100, CompactFlash cards by Socket
+ * Symbol Wireless Networker LA4137, CompactFlash cards by Socket
* Communications and Intel PRO/Wireless 2011B.
*
* The driver implements Symbol firmware download. The rest is handled
@@ -120,8 +120,8 @@ static void spectrum_cs_release(struct pcmcia_device *link);
* Each block has the following structure.
*/
struct dblock {
- __le32 _addr; /* adapter address where to write the block */
- __le16 _len; /* length of the data only, in bytes */
+ __le32 addr; /* adapter address where to write the block */
+ __le16 len; /* length of the data only, in bytes */
char data[0]; /* data to be written */
} __attribute__ ((packed));
@@ -131,9 +131,9 @@ struct dblock {
* items with matching ID should be written.
*/
struct pdr {
- __le32 _id; /* record ID */
- __le32 _addr; /* adapter address where to write the data */
- __le32 _len; /* expected length of the data, in bytes */
+ __le32 id; /* record ID */
+ __le32 addr; /* adapter address where to write the data */
+ __le32 len; /* expected length of the data, in bytes */
char next[0]; /* next PDR starts here */
} __attribute__ ((packed));
@@ -144,8 +144,8 @@ struct pdr {
* be plugged into the secondary firmware.
*/
struct pdi {
- __le16 _len; /* length of ID and data, in words */
- __le16 _id; /* record ID */
+ __le16 len; /* length of ID and data, in words */
+ __le16 id; /* record ID */
char data[0]; /* plug data */
} __attribute__ ((packed));
@@ -154,44 +154,44 @@ struct pdi {
static inline u32
dblock_addr(const struct dblock *blk)
{
- return le32_to_cpu(blk->_addr);
+ return le32_to_cpu(blk->addr);
}
static inline u32
dblock_len(const struct dblock *blk)
{
- return le16_to_cpu(blk->_len);
+ return le16_to_cpu(blk->len);
}
static inline u32
pdr_id(const struct pdr *pdr)
{
- return le32_to_cpu(pdr->_id);
+ return le32_to_cpu(pdr->id);
}
static inline u32
pdr_addr(const struct pdr *pdr)
{
- return le32_to_cpu(pdr->_addr);
+ return le32_to_cpu(pdr->addr);
}
static inline u32
pdr_len(const struct pdr *pdr)
{
- return le32_to_cpu(pdr->_len);
+ return le32_to_cpu(pdr->len);
}
static inline u32
pdi_id(const struct pdi *pdi)
{
- return le16_to_cpu(pdi->_id);
+ return le16_to_cpu(pdi->id);
}
/* Return length of the data only, in bytes */
static inline u32
pdi_len(const struct pdi *pdi)
{
- return 2 * (le16_to_cpu(pdi->_len) - 1);
+ return 2 * (le16_to_cpu(pdi->len) - 1);
}
@@ -343,8 +343,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
/* do the actual plugging */
spectrum_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_words(hw, HERMES_AUXDATA, pdi->data,
- pdi_len(pdi) / 2);
+ hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
return 0;
}
@@ -424,8 +423,8 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
while (dblock_addr(blk) != BLOCK_END) {
spectrum_aux_setaddr(hw, blkaddr);
- hermes_write_words(hw, HERMES_AUXDATA, blk->data,
- blklen / 2);
+ hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
+ blklen);
blk = (struct dblock *) &blk->data[blklen];
blkaddr = dblock_addr(blk);
@@ -626,14 +625,11 @@ static void spectrum_cs_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ if (link->dev_node)
+ unregister_netdev(dev);
+
spectrum_cs_release(link);
- DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
- if (link->dev_node) {
- DEBUG(0, PFX "About to unregister net device %p\n",
- dev);
- unregister_netdev(dev);
- }
free_orinocodev(dev);
} /* spectrum_cs_detach */
@@ -653,13 +649,10 @@ spectrum_cs_config(struct pcmcia_device *link)
int last_fn, last_ret;
u_char buf[64];
config_info_t conf;
- cisinfo_t info;
tuple_t tuple;
cisparse_t parse;
void __iomem *mem;
- CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
-
/*
* This reads the card's CONFIG tuple to find its
* configuration registers.
@@ -709,12 +702,6 @@ spectrum_cs_config(struct pcmcia_device *link)
goto next_entry;
link->conf.ConfigIndex = cfg->index;
- /* Does this card need audio output? */
- if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
-
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
@@ -835,19 +822,10 @@ spectrum_cs_config(struct pcmcia_device *link)
net_device has been registered */
/* Finally, report what we've done */
- printk(KERN_DEBUG "%s: index 0x%02x: ",
- dev->name, link->conf.ConfigIndex);
- if (link->conf.Vpp)
- printk(", Vpp %d.%d", link->conf.Vpp / 10,
- link->conf.Vpp % 10);
- printk(", irq %d", link->irq.AssignedIRQ);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- printk("\n");
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
+ "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id,
+ link->irq.AssignedIRQ, link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
return 0;
@@ -888,11 +866,10 @@ spectrum_cs_suspend(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
int err = 0;
/* Mark the device as stopped, to block IO until later */
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
err = __orinoco_down(dev);
if (err)
@@ -902,9 +879,9 @@ spectrum_cs_suspend(struct pcmcia_device *link)
netif_device_detach(dev);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
- return 0;
+ return err;
}
static int
@@ -932,7 +909,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" David Gibson <hermes@gibson.dropbear.id.au>, et al)";
static struct pcmcia_device_id spectrum_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */
+ PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */
PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
PCMCIA_DEVICE_NULL,
diff --git a/drivers/usb/net/zd1201.c b/drivers/net/wireless/zd1201.c
index 9b1e4ed1d07e..662ecc8a33ff 100644
--- a/drivers/usb/net/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -33,7 +33,7 @@ static struct usb_device_id zd1201_table[] = {
{}
};
-static int ap = 0; /* Are we an AP or a normal station? */
+static int ap; /* Are we an AP or a normal station? */
#define ZD1201_VERSION "0.15"
@@ -49,7 +49,7 @@ MODULE_DEVICE_TABLE(usb, zd1201_table);
static int zd1201_fw_upload(struct usb_device *dev, int apfw)
{
const struct firmware *fw_entry;
- char* data;
+ char *data;
unsigned long len;
int err;
unsigned char ret;
@@ -65,7 +65,7 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
if (err) {
dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile);
dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n");
- dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info\n");
+ dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n");
return err;
}
@@ -94,12 +94,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
-
+
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
-
+
if (ret & 0x80) {
err = -EIO;
goto exit;
@@ -166,13 +166,13 @@ static int zd1201_docmd(struct zd1201 *zd, int cmd, int parm0,
return -ENOMEM;
}
usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2),
- command, 16, zd1201_usbfree, zd);
+ command, 16, zd1201_usbfree, zd);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
kfree(command);
usb_free_urb(urb);
}
-
+
return ret;
}
@@ -316,7 +316,7 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
fc = le16_to_cpu(*(__le16 *)&data[datalen-16]);
seq = le16_to_cpu(*(__le16 *)&data[datalen-24]);
- if(zd->monitor) {
+ if (zd->monitor) {
if (datalen < 24)
goto resubmit;
if (!(skb = dev_alloc_skb(datalen+24)))
@@ -364,7 +364,7 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
goto resubmit;
}
hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
- if(frag->seq == (seq&IEEE80211_SCTL_SEQ))
+ if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
break;
if (!frag)
goto resubmit;
@@ -376,7 +376,6 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
goto resubmit;
hlist_del_init(&frag->fnode);
kfree(frag);
- /* Fallthrough */
} else {
if (datalen<14)
goto resubmit;
@@ -422,7 +421,7 @@ static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
int rid_fid;
int length;
unsigned char *pdata;
-
+
zd->rxdatas = 0;
err = zd1201_docmd(zd, ZD1201_CMDCODE_ACCESS, rid, 0, 0);
if (err)
@@ -471,11 +470,11 @@ static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
length = zd->rxlen;
do {
- int actual_length;
+ int actual_length;
actual_length = (length > 64) ? 64 : length;
- if(pdata[0] != 0x3) {
+ if (pdata[0] != 0x3) {
dev_dbg(&zd->usb->dev, "Rx Resource packet type error: %02X\n",
pdata[0]);
return -EINVAL;
@@ -487,11 +486,10 @@ static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
}
/* Skip the 4 bytes header (RID length and RID) */
- if(i == 0) {
+ if (i == 0) {
pdata += 8;
actual_length -= 8;
- }
- else {
+ } else {
pdata += 4;
actual_length -= 4;
}
@@ -620,7 +618,7 @@ static int zd1201_drvr_start(struct zd1201 *zd)
short max;
__le16 zdmax;
unsigned char *buffer;
-
+
buffer = kzalloc(ZD1201_RXSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -632,7 +630,7 @@ static int zd1201_drvr_start(struct zd1201 *zd)
err = usb_submit_urb(zd->rx_urb, GFP_KERNEL);
if (err)
goto err_buffer;
-
+
err = zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
if (err)
goto err_urb;
@@ -684,7 +682,7 @@ static int zd1201_enable(struct zd1201 *zd)
static int zd1201_disable(struct zd1201 *zd)
{
int err;
-
+
if (!zd->mac_enabled)
return 0;
if (zd->monitor) {
@@ -764,7 +762,6 @@ static int zd1201_net_open(struct net_device *dev)
static int zd1201_net_stop(struct net_device *dev)
{
netif_stop_queue(dev);
-
return 0;
}
@@ -915,7 +912,6 @@ static int zd1201_get_name(struct net_device *dev,
struct iw_request_info *info, char *name, char *extra)
{
strcpy(name, "IEEE 802.11b");
-
return 0;
}
@@ -1013,11 +1009,10 @@ static int zd1201_set_mode(struct net_device *dev,
if (err)
return err;
}
- zd->monitor=monitor;
+ zd->monitor = monitor;
/* If monitor mode is set we don't actually turn it on here since it
* is done during mac reset anyway (see zd1201_mac_enable).
*/
-
zd1201_mac_reset(zd);
return 0;
@@ -1117,7 +1112,7 @@ static int zd1201_get_wap(struct net_device *dev,
zd->iwstats.qual.updated = 2;
}
- return zd1201_getconfig(zd,ZD1201_RID_CURRENTBSSID,ap_addr->sa_data,6);
+ return zd1201_getconfig(zd, ZD1201_RID_CURRENTBSSID, ap_addr->sa_data, 6);
}
static int zd1201_set_scan(struct net_device *dev,
@@ -1275,7 +1270,7 @@ static int zd1201_set_rate(struct net_device *dev,
if (!rrq->fixed) { /* Also enable all lower bitrates */
rate |= rate-1;
}
-
+
err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, rate);
if (err)
return err;
@@ -1486,7 +1481,7 @@ static int zd1201_get_encode(struct net_device *dev,
return -EINVAL;
erq->flags |= i+1;
-
+
erq->length = zd->encode_keylen[i];
memcpy(key, zd->encode_keys[i], erq->length);
@@ -1529,11 +1524,7 @@ static int zd1201_set_power(struct net_device *dev,
return -EINVAL;
}
out:
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled);
- if (err)
- return err;
-
- return 0;
+ return zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled);
}
static int zd1201_get_power(struct net_device *dev,
@@ -1627,15 +1618,11 @@ static int zd1201_set_hostauth(struct net_device *dev,
struct iw_request_info *info, struct iw_param *rrq, char *extra)
{
struct zd1201 *zd = (struct zd1201 *)dev->priv;
- int err;
if (!zd->ap)
return -EOPNOTSUPP;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value);
- if (err)
- return err;
- return 0;
+ return zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value);
}
static int zd1201_get_hostauth(struct net_device *dev,
@@ -1744,7 +1731,7 @@ static int zd1201_probe(struct usb_interface *interface,
{
struct zd1201 *zd;
struct usb_device *usb;
- int i, err;
+ int err;
short porttype;
char buf[IW_ESSID_MAX_SIZE+2];
@@ -1773,9 +1760,7 @@ static int zd1201_probe(struct usb_interface *interface,
if (!zd->rx_urb || !zd->tx_urb)
goto err_zd;
- for(i = 0; i<100; i++)
- udelay(1000);
-
+ mdelay(100);
err = zd1201_drvr_start(zd);
if (err)
goto err_zd;
@@ -1833,7 +1818,7 @@ static int zd1201_probe(struct usb_interface *interface,
goto err_net;
dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n",
zd->dev->name);
-
+
usb_set_intfdata(interface, zd);
return 0;
diff --git a/drivers/usb/net/zd1201.h b/drivers/net/wireless/zd1201.h
index 235f0ee34b24..235f0ee34b24 100644
--- a/drivers/usb/net/zd1201.h
+++ b/drivers/net/wireless/zd1201.h
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1456759936c5..10e1a905c144 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
* Default resume method for devices that have no driver provided resume,
* or not even a driver at all.
*/
-static void pci_default_resume(struct pci_dev *pci_dev)
+static int pci_default_resume(struct pci_dev *pci_dev)
{
- int retval;
+ int retval = 0;
/* restore the PCI config space */
pci_restore_state(pci_dev);
@@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev)
/* if the device was busmaster before the suspend, make it busmaster again */
if (pci_dev->is_busmaster)
pci_set_master(pci_dev);
+
+ return retval;
}
static int pci_device_resume(struct device * dev)
{
+ int error;
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
if (drv && drv->resume)
- drv->resume(pci_dev);
+ error = drv->resume(pci_dev);
else
- pci_default_resume(pci_dev);
- return 0;
+ error = pci_default_resume(pci_dev);
+ return error;
}
static void pci_device_shutdown(struct device *dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2329f941a0dc..fde41cc14734 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -164,7 +164,6 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap);
}
-#if 0
/**
* pci_find_ext_capability - Find an extended capability
* @dev: PCI device to query
@@ -212,7 +211,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
return 0;
}
-#endif /* 0 */
+EXPORT_SYMBOL_GPL(pci_find_ext_capability);
/**
* pci_find_parent_resource - return resource region of parent bus of given region
@@ -461,9 +460,23 @@ int
pci_restore_state(struct pci_dev *dev)
{
int i;
+ int val;
- for (i = 0; i < 16; i++)
- pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
+ /*
+ * The Base Address register should be programmed before the command
+ * register(s)
+ */
+ for (i = 15; i >= 0; i--) {
+ pci_read_config_dword(dev, i * 4, &val);
+ if (val != dev->saved_config_space[i]) {
+ printk(KERN_DEBUG "PM: Writing back config space on "
+ "device %s at offset %x (was %x, writing %x)\n",
+ pci_name(dev), i,
+ val, (int)dev->saved_config_space[i]);
+ pci_write_config_dword(dev,i * 4,
+ dev->saved_config_space[i]);
+ }
+ }
pci_restore_msi_state(dev);
pci_restore_msix_state(dev);
return 0;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 48d3b3d30c21..74b3124e8247 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1143,6 +1143,12 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
{
struct pcmcia_socket *s = pcmcia_get_socket(skt);
+ if (!s) {
+ printk(KERN_ERR "PCMCIA obtaining reference to socket %p " \
+ "failed, event 0x%x lost!\n", skt, event);
+ return -ENODEV;
+ }
+
ds_dbg(1, "ds_event(0x%06x, %d, 0x%p)\n",
event, priority, skt);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f6e7ee04f3dc..8c0d1a6739ad 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -48,33 +48,33 @@ static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
struct platform_device *pdev = to_platform_device(dev);
struct m48t86_ops *ops = pdev->dev.platform_data;
- reg = ops->readb(M48T86_REG_B);
+ reg = ops->readbyte(M48T86_REG_B);
if (reg & M48T86_REG_B_DM) {
/* data (binary) mode */
- tm->tm_sec = ops->readb(M48T86_REG_SEC);
- tm->tm_min = ops->readb(M48T86_REG_MIN);
- tm->tm_hour = ops->readb(M48T86_REG_HOUR) & 0x3F;
- tm->tm_mday = ops->readb(M48T86_REG_DOM);
+ tm->tm_sec = ops->readbyte(M48T86_REG_SEC);
+ tm->tm_min = ops->readbyte(M48T86_REG_MIN);
+ tm->tm_hour = ops->readbyte(M48T86_REG_HOUR) & 0x3F;
+ tm->tm_mday = ops->readbyte(M48T86_REG_DOM);
/* tm_mon is 0-11 */
- tm->tm_mon = ops->readb(M48T86_REG_MONTH) - 1;
- tm->tm_year = ops->readb(M48T86_REG_YEAR) + 100;
- tm->tm_wday = ops->readb(M48T86_REG_DOW);
+ tm->tm_mon = ops->readbyte(M48T86_REG_MONTH) - 1;
+ tm->tm_year = ops->readbyte(M48T86_REG_YEAR) + 100;
+ tm->tm_wday = ops->readbyte(M48T86_REG_DOW);
} else {
/* bcd mode */
- tm->tm_sec = BCD2BIN(ops->readb(M48T86_REG_SEC));
- tm->tm_min = BCD2BIN(ops->readb(M48T86_REG_MIN));
- tm->tm_hour = BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F);
- tm->tm_mday = BCD2BIN(ops->readb(M48T86_REG_DOM));
+ tm->tm_sec = BCD2BIN(ops->readbyte(M48T86_REG_SEC));
+ tm->tm_min = BCD2BIN(ops->readbyte(M48T86_REG_MIN));
+ tm->tm_hour = BCD2BIN(ops->readbyte(M48T86_REG_HOUR) & 0x3F);
+ tm->tm_mday = BCD2BIN(ops->readbyte(M48T86_REG_DOM));
/* tm_mon is 0-11 */
- tm->tm_mon = BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1;
- tm->tm_year = BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100;
- tm->tm_wday = BCD2BIN(ops->readb(M48T86_REG_DOW));
+ tm->tm_mon = BCD2BIN(ops->readbyte(M48T86_REG_MONTH)) - 1;
+ tm->tm_year = BCD2BIN(ops->readbyte(M48T86_REG_YEAR)) + 100;
+ tm->tm_wday = BCD2BIN(ops->readbyte(M48T86_REG_DOW));
}
/* correct the hour if the clock is in 12h mode */
if (!(reg & M48T86_REG_B_H24))
- if (ops->readb(M48T86_REG_HOUR) & 0x80)
+ if (ops->readbyte(M48T86_REG_HOUR) & 0x80)
tm->tm_hour += 12;
return 0;
@@ -86,35 +86,35 @@ static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct platform_device *pdev = to_platform_device(dev);
struct m48t86_ops *ops = pdev->dev.platform_data;
- reg = ops->readb(M48T86_REG_B);
+ reg = ops->readbyte(M48T86_REG_B);
/* update flag and 24h mode */
reg |= M48T86_REG_B_SET | M48T86_REG_B_H24;
- ops->writeb(reg, M48T86_REG_B);
+ ops->writebyte(reg, M48T86_REG_B);
if (reg & M48T86_REG_B_DM) {
/* data (binary) mode */
- ops->writeb(tm->tm_sec, M48T86_REG_SEC);
- ops->writeb(tm->tm_min, M48T86_REG_MIN);
- ops->writeb(tm->tm_hour, M48T86_REG_HOUR);
- ops->writeb(tm->tm_mday, M48T86_REG_DOM);
- ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH);
- ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR);
- ops->writeb(tm->tm_wday, M48T86_REG_DOW);
+ ops->writebyte(tm->tm_sec, M48T86_REG_SEC);
+ ops->writebyte(tm->tm_min, M48T86_REG_MIN);
+ ops->writebyte(tm->tm_hour, M48T86_REG_HOUR);
+ ops->writebyte(tm->tm_mday, M48T86_REG_DOM);
+ ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH);
+ ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR);
+ ops->writebyte(tm->tm_wday, M48T86_REG_DOW);
} else {
/* bcd mode */
- ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
- ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
- ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
- ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
- ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
- ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
- ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
+ ops->writebyte(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
+ ops->writebyte(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
+ ops->writebyte(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
+ ops->writebyte(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
+ ops->writebyte(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
+ ops->writebyte(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
+ ops->writebyte(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
}
/* update ended */
reg &= ~M48T86_REG_B_SET;
- ops->writeb(reg, M48T86_REG_B);
+ ops->writebyte(reg, M48T86_REG_B);
return 0;
}
@@ -125,12 +125,12 @@ static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq)
struct platform_device *pdev = to_platform_device(dev);
struct m48t86_ops *ops = pdev->dev.platform_data;
- reg = ops->readb(M48T86_REG_B);
+ reg = ops->readbyte(M48T86_REG_B);
seq_printf(seq, "mode\t\t: %s\n",
(reg & M48T86_REG_B_DM) ? "binary" : "bcd");
- reg = ops->readb(M48T86_REG_D);
+ reg = ops->readbyte(M48T86_REG_D);
seq_printf(seq, "battery\t\t: %s\n",
(reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
@@ -157,7 +157,7 @@ static int __devinit m48t86_rtc_probe(struct platform_device *dev)
platform_set_drvdata(dev, rtc);
/* read battery status */
- reg = ops->readb(M48T86_REG_D);
+ reg = ops->readbyte(M48T86_REG_D);
dev_info(&dev->dev, "battery %s\n",
(reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 74a257b23383..e210f89a2449 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -45,11 +45,11 @@ struct pgid {
union {
__u8 fc; /* SPID function code */
struct path_state ps; /* SNID path state */
- } inf;
+ } __attribute__ ((packed)) inf;
union {
__u32 cpu_addr : 16; /* CPU address */
struct extended_cssid ext_cssid;
- } pgid_high;
+ } __attribute__ ((packed)) pgid_high;
__u32 cpu_id : 24; /* CPU identification */
__u32 cpu_model : 16; /* CPU model */
__u32 tod_high; /* high word TOD clock */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 180b3bf8b90d..49ec562d7f60 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -749,7 +749,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
- memcpy(irb, &cdev->private->irb, sizeof(struct irb));
+ memcpy(&cdev->private->irb, irb, sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 90d4d0ef3dd4..6775a837d646 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -2,7 +2,7 @@
# S/390 network devices
#
-ctc-objs := ctcmain.o ctctty.o ctcdbug.o
+ctc-objs := ctcmain.o ctcdbug.o
obj-$(CONFIG_IUCV) += iucv.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
@@ -10,6 +10,7 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o
obj-$(CONFIG_CLAW) += claw.o cu3088.o
+obj-$(CONFIG_MPC) += ctcmpc.o fsm.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o
qeth-$(CONFIG_PROC_FS) += qeth_proc.o
obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index fe986af884f8..20c8eb16f464 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -6,7 +6,7 @@
* Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Peter Tiedemann (ptiedem@de.ibm.com)
- * Driver Model stuff by : Cornelia Huck <huckc@de.ibm.com>
+ * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
*
* Documentation used:
* - Principles of Operation (IBM doc#: SA22-7201-06)
@@ -65,7 +65,6 @@
#include <asm/idals.h>
-#include "ctctty.h"
#include "fsm.h"
#include "cu3088.h"
@@ -479,10 +478,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb->dev = pskb->dev;
skb->protocol = pskb->protocol;
pskb->ip_summed = CHECKSUM_UNNECESSARY;
- if (ch->protocol == CTC_PROTO_LINUX_TTY)
- ctc_tty_netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_ni(skb);
/**
* Successful rx; reset logflags
*/
@@ -557,8 +553,7 @@ ccw_unit_check(struct channel *ch, unsigned char sense)
DBF_TEXT(trace, 5, __FUNCTION__);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
- if (ch->protocol != CTC_PROTO_LINUX_TTY)
- ctc_pr_debug("%s: Interface disc. or Sel. reset "
+ ctc_pr_debug("%s: Interface disc. or Sel. reset "
"(remote)\n", ch->id);
fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
} else {
@@ -2034,7 +2029,6 @@ static void
dev_action_chup(fsm_instance * fi, int event, void *arg)
{
struct net_device *dev = (struct net_device *) arg;
- struct ctc_priv *privptr = dev->priv;
DBF_TEXT(trace, 3, __FUNCTION__);
switch (fsm_getstate(fi)) {
@@ -2049,8 +2043,6 @@ dev_action_chup(fsm_instance * fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_RUNNING);
ctc_pr_info("%s: connected with remote side\n",
dev->name);
- if (privptr->protocol == CTC_PROTO_LINUX_TTY)
- ctc_tty_setcarrier(dev, 1);
ctc_clear_busy(dev);
}
break;
@@ -2059,8 +2051,6 @@ dev_action_chup(fsm_instance * fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_RUNNING);
ctc_pr_info("%s: connected with remote side\n",
dev->name);
- if (privptr->protocol == CTC_PROTO_LINUX_TTY)
- ctc_tty_setcarrier(dev, 1);
ctc_clear_busy(dev);
}
break;
@@ -2086,14 +2076,10 @@ dev_action_chup(fsm_instance * fi, int event, void *arg)
static void
dev_action_chdown(fsm_instance * fi, int event, void *arg)
{
- struct net_device *dev = (struct net_device *) arg;
- struct ctc_priv *privptr = dev->priv;
DBF_TEXT(trace, 3, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
- if (privptr->protocol == CTC_PROTO_LINUX_TTY)
- ctc_tty_setcarrier(dev, 0);
if (event == DEV_EVENT_TXDOWN)
fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
else
@@ -2397,8 +2383,6 @@ ctc_tx(struct sk_buff *skb, struct net_device * dev)
*/
if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
fsm_event(privptr->fsm, DEV_EVENT_START, dev);
- if (privptr->protocol == CTC_PROTO_LINUX_TTY)
- return -EBUSY;
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
privptr->stats.tx_errors++;
@@ -2608,20 +2592,13 @@ ctc_netdev_unregister(struct net_device * dev)
if (!dev)
return;
privptr = (struct ctc_priv *) dev->priv;
- if (privptr->protocol != CTC_PROTO_LINUX_TTY)
- unregister_netdev(dev);
- else
- ctc_tty_unregister_netdev(dev);
+ unregister_netdev(dev);
}
static int
ctc_netdev_register(struct net_device * dev)
{
- struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- if (privptr->protocol != CTC_PROTO_LINUX_TTY)
- return register_netdev(dev);
- else
- return ctc_tty_register_netdev(dev);
+ return register_netdev(dev);
}
static void
@@ -2667,7 +2644,9 @@ ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *b
if (!priv)
return -ENODEV;
sscanf(buf, "%u", &value);
- if ((value < 0) || (value > CTC_PROTO_MAX))
+ if (!((value == CTC_PROTO_S390) ||
+ (value == CTC_PROTO_LINUX) ||
+ (value == CTC_PROTO_OS390)))
return -EINVAL;
priv->protocol = value;
@@ -2897,10 +2876,7 @@ ctc_new_device(struct ccwgroup_device *cgdev)
goto out;
}
- if (privptr->protocol == CTC_PROTO_LINUX_TTY)
- strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
- else
- strlcpy(dev->name, "ctc%d", IFNAMSIZ);
+ strlcpy(dev->name, "ctc%d", IFNAMSIZ);
for (direction = READ; direction <= WRITE; direction++) {
privptr->channel[direction] =
@@ -3046,7 +3022,6 @@ ctc_exit(void)
{
DBF_TEXT(setup, 3, __FUNCTION__);
unregister_cu3088_discipline(&ctc_group_driver);
- ctc_tty_cleanup();
ctc_unregister_dbf_views();
ctc_pr_info("CTC driver unloaded\n");
}
@@ -3073,10 +3048,8 @@ ctc_init(void)
ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
return ret;
}
- ctc_tty_init();
ret = register_cu3088_discipline(&ctc_group_driver);
if (ret) {
- ctc_tty_cleanup();
ctc_unregister_dbf_views();
}
return ret;
diff --git a/drivers/s390/net/ctcmain.h b/drivers/s390/net/ctcmain.h
index d2e835c0c134..7f305d119f3d 100644
--- a/drivers/s390/net/ctcmain.h
+++ b/drivers/s390/net/ctcmain.h
@@ -35,7 +35,9 @@
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
-#include "ctctty.h"
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
#include "fsm.h"
#include "cu3088.h"
@@ -50,9 +52,7 @@
#define CTC_PROTO_S390 0
#define CTC_PROTO_LINUX 1
-#define CTC_PROTO_LINUX_TTY 2
#define CTC_PROTO_OS390 3
-#define CTC_PROTO_MAX 3
#define CTC_BUFSIZE_LIMIT 65535
#define CTC_BUFSIZE_DEFAULT 32768
@@ -257,15 +257,13 @@ static __inline__ void
ctc_clear_busy(struct net_device * dev)
{
clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
- if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
- netif_wake_queue(dev);
+ netif_wake_queue(dev);
}
static __inline__ int
ctc_test_and_set_busy(struct net_device * dev)
{
- if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
- netif_stop_queue(dev);
+ netif_stop_queue(dev);
return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
}
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c
deleted file mode 100644
index af54d1de07bf..000000000000
--- a/drivers/s390/net/ctctty.c
+++ /dev/null
@@ -1,1259 +0,0 @@
-/*
- * CTC / ESCON network driver, tty interface.
- *
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <asm/uaccess.h>
-#include <linux/devfs_fs_kernel.h>
-#include "ctctty.h"
-#include "ctcdbug.h"
-
-#define CTC_TTY_MAJOR 43
-#define CTC_TTY_MAX_DEVICES 64
-
-#define CTC_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
-#define CTC_ASYNC_INITIALIZED 0x80000000 /* port was initialized */
-#define CTC_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */
-#define CTC_ASYNC_CLOSING 0x08000000 /* Serial port is closing */
-#define CTC_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define CTC_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-#define CTC_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */
-#define CTC_ASYNC_NETDEV_OPEN 0x0002 /* Underlying netdev is open */
-#define CTC_ASYNC_TX_LINESTAT 0x0004 /* Must send line status */
-#define CTC_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */
-#define CTC_TTY_XMIT_SIZE 1024 /* Default bufsize for write */
-#define CTC_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
-
-/* Private data (similar to async_struct in <linux/serial.h>) */
-typedef struct {
- int magic;
- int flags; /* defined in tty.h */
- int mcr; /* Modem control register */
- int msr; /* Modem status register */
- int lsr; /* Line status register */
- int line;
- int count; /* # of fd on device */
- int blocked_open; /* # of blocked opens */
- struct net_device *netdev;
- struct sk_buff_head tx_queue; /* transmit queue */
- struct sk_buff_head rx_queue; /* receive queue */
- struct tty_struct *tty; /* Pointer to corresponding tty */
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
- struct semaphore write_sem;
- struct tasklet_struct tasklet;
- struct timer_list stoptimer;
-} ctc_tty_info;
-
-/* Description of one CTC-tty */
-typedef struct {
- struct tty_driver *ctc_tty_device; /* tty-device */
- ctc_tty_info info[CTC_TTY_MAX_DEVICES]; /* Private data */
-} ctc_tty_driver;
-
-static ctc_tty_driver *driver;
-
-/* Leave this unchanged unless you know what you do! */
-#define MODEM_PARANOIA_CHECK
-#define MODEM_DO_RESTART
-
-#define CTC_TTY_NAME "ctctty"
-
-static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC;
-static int ctc_tty_shuttingdown = 0;
-
-static spinlock_t ctc_tty_lock;
-
-/* ctc_tty_try_read() is called from within ctc_tty_rcv_skb()
- * to stuff incoming data directly into a tty's flip-buffer. If the
- * flip buffer is full, the packet gets queued up.
- *
- * Return:
- * 1 = Success
- * 0 = Failure, data has to be buffered and later processed by
- * ctc_tty_readmodem().
- */
-static int
-ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
-{
- int len;
- struct tty_struct *tty;
-
- DBF_TEXT(trace, 5, __FUNCTION__);
- if ((tty = info->tty)) {
- if (info->mcr & UART_MCR_RTS) {
- len = skb->len;
- tty_insert_flip_string(tty, skb->data, len);
- tty_flip_buffer_push(tty);
- kfree_skb(skb);
- return 1;
- }
- }
- return 0;
-}
-
-/* ctc_tty_readmodem() is called periodically from within timer-interrupt.
- * It tries getting received data from the receive queue an stuff it into
- * the tty's flip-buffer.
- */
-static int
-ctc_tty_readmodem(ctc_tty_info *info)
-{
- int ret = 1;
- struct tty_struct *tty;
-
- DBF_TEXT(trace, 5, __FUNCTION__);
- if ((tty = info->tty)) {
- if (info->mcr & UART_MCR_RTS) {
- struct sk_buff *skb;
-
- if ((skb = skb_dequeue(&info->rx_queue))) {
- int len = skb->len;
- tty_insert_flip_string(tty, skb->data, len);
- skb_pull(skb, len);
- tty_flip_buffer_push(tty);
- if (skb->len > 0)
- skb_queue_head(&info->rx_queue, skb);
- else {
- kfree_skb(skb);
- ret = !skb_queue_empty(&info->rx_queue);
- }
- }
- }
- }
- return ret;
-}
-
-void
-ctc_tty_setcarrier(struct net_device *netdev, int on)
-{
- int i;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if ((!driver) || ctc_tty_shuttingdown)
- return;
- for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
- if (driver->info[i].netdev == netdev) {
- ctc_tty_info *info = &driver->info[i];
- if (on)
- info->msr |= UART_MSR_DCD;
- else
- info->msr &= ~UART_MSR_DCD;
- if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on))
- tty_hangup(info->tty);
- }
-}
-
-void
-ctc_tty_netif_rx(struct sk_buff *skb)
-{
- int i;
- ctc_tty_info *info = NULL;
-
- DBF_TEXT(trace, 5, __FUNCTION__);
- if (!skb)
- return;
- if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
- dev_kfree_skb(skb);
- return;
- }
- for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
- if (driver->info[i].netdev == skb->dev) {
- info = &driver->info[i];
- break;
- }
- if (!info) {
- dev_kfree_skb(skb);
- return;
- }
- if (skb->len < 6) {
- dev_kfree_skb(skb);
- return;
- }
- if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) {
- dev_kfree_skb(skb);
- return;
- }
- skb_pull(skb, sizeof(__u32));
-
- i = *((int *)skb->data);
- skb_pull(skb, sizeof(info->mcr));
- if (i & UART_MCR_RTS) {
- info->msr |= UART_MSR_CTS;
- if (info->flags & CTC_ASYNC_CTS_FLOW)
- info->tty->hw_stopped = 0;
- } else {
- info->msr &= ~UART_MSR_CTS;
- if (info->flags & CTC_ASYNC_CTS_FLOW)
- info->tty->hw_stopped = 1;
- }
- if (i & UART_MCR_DTR)
- info->msr |= UART_MSR_DSR;
- else
- info->msr &= ~UART_MSR_DSR;
- if (skb->len <= 0) {
- kfree_skb(skb);
- return;
- }
- /* Try to deliver directly via tty-flip-buf if queue is empty */
- if (skb_queue_empty(&info->rx_queue))
- if (ctc_tty_try_read(info, skb))
- return;
- /* Direct deliver failed or queue wasn't empty.
- * Queue up for later dequeueing via timer-irq.
- */
- skb_queue_tail(&info->rx_queue, skb);
- /* Schedule dequeuing */
- tasklet_schedule(&info->tasklet);
-}
-
-static int
-ctc_tty_tint(ctc_tty_info * info)
-{
- struct sk_buff *skb = skb_dequeue(&info->tx_queue);
- int stopped = (info->tty->hw_stopped || info->tty->stopped);
- int wake = 1;
- int rc;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (!info->netdev) {
- if (skb)
- kfree_skb(skb);
- return 0;
- }
- if (info->flags & CTC_ASYNC_TX_LINESTAT) {
- int skb_res = info->netdev->hard_header_len +
- sizeof(info->mcr) + sizeof(__u32);
- /* If we must update line status,
- * create an empty dummy skb and insert it.
- */
- if (skb)
- skb_queue_head(&info->tx_queue, skb);
-
- skb = dev_alloc_skb(skb_res);
- if (!skb) {
- printk(KERN_WARNING
- "ctc_tty: Out of memory in %s%d tint\n",
- CTC_TTY_NAME, info->line);
- return 1;
- }
- skb_reserve(skb, skb_res);
- stopped = 0;
- wake = 0;
- }
- if (!skb)
- return 0;
- if (stopped) {
- skb_queue_head(&info->tx_queue, skb);
- return 1;
- }
-#if 0
- if (skb->len > 0)
- printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data));
- else
- printk(KERN_DEBUG "tint: %d STAT\n", skb->len);
-#endif
- memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr));
- memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32));
- rc = info->netdev->hard_start_xmit(skb, info->netdev);
- if (rc) {
- skb_pull(skb, sizeof(info->mcr) + sizeof(__u32));
- if (skb->len > 0)
- skb_queue_head(&info->tx_queue, skb);
- else
- kfree_skb(skb);
- } else {
- struct tty_struct *tty = info->tty;
-
- info->flags &= ~CTC_ASYNC_TX_LINESTAT;
- if (tty) {
- tty_wakeup(tty);
- }
- }
- return (skb_queue_empty(&info->tx_queue) ? 0 : 1);
-}
-
-/************************************************************
- *
- * Modem-functions
- *
- * mostly "stolen" from original Linux-serial.c and friends.
- *
- ************************************************************/
-
-static inline int
-ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine)
-{
-#ifdef MODEM_PARANOIA_CHECK
- if (!info) {
- printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n",
- name, routine);
- return 1;
- }
- if (info->magic != CTC_ASYNC_MAGIC) {
- printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n",
- name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-static void
-ctc_tty_inject(ctc_tty_info *info, char c)
-{
- int skb_res;
- struct sk_buff *skb;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_shuttingdown)
- return;
- skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
- sizeof(__u32) + 1;
- skb = dev_alloc_skb(skb_res);
- if (!skb) {
- printk(KERN_WARNING
- "ctc_tty: Out of memory in %s%d tx_inject\n",
- CTC_TTY_NAME, info->line);
- return;
- }
- skb_reserve(skb, skb_res);
- *(skb_put(skb, 1)) = c;
- skb_queue_head(&info->tx_queue, skb);
- tasklet_schedule(&info->tasklet);
-}
-
-static void
-ctc_tty_transmit_status(ctc_tty_info *info)
-{
- DBF_TEXT(trace, 5, __FUNCTION__);
- if (ctc_tty_shuttingdown)
- return;
- info->flags |= CTC_ASYNC_TX_LINESTAT;
- tasklet_schedule(&info->tasklet);
-}
-
-static void
-ctc_tty_change_speed(ctc_tty_info * info)
-{
- unsigned int cflag;
- unsigned int quot;
- int i;
-
- DBF_TEXT(trace, 3, __FUNCTION__);
- if (!info->tty || !info->tty->termios)
- return;
- cflag = info->tty->termios->c_cflag;
-
- quot = i = cflag & CBAUD;
- if (i & CBAUDEX) {
- i &= ~CBAUDEX;
- if (i < 1 || i > 2)
- info->tty->termios->c_cflag &= ~CBAUDEX;
- else
- i += 15;
- }
- if (quot) {
- info->mcr |= UART_MCR_DTR;
- info->mcr |= UART_MCR_RTS;
- ctc_tty_transmit_status(info);
- } else {
- info->mcr &= ~UART_MCR_DTR;
- info->mcr &= ~UART_MCR_RTS;
- ctc_tty_transmit_status(info);
- return;
- }
-
- /* CTS flow control flag and modem status interrupts */
- if (cflag & CRTSCTS) {
- info->flags |= CTC_ASYNC_CTS_FLOW;
- } else
- info->flags &= ~CTC_ASYNC_CTS_FLOW;
- if (cflag & CLOCAL)
- info->flags &= ~CTC_ASYNC_CHECK_CD;
- else {
- info->flags |= CTC_ASYNC_CHECK_CD;
- }
-}
-
-static int
-ctc_tty_startup(ctc_tty_info * info)
-{
- DBF_TEXT(trace, 3, __FUNCTION__);
- if (info->flags & CTC_ASYNC_INITIALIZED)
- return 0;
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line);
-#endif
- /*
- * Now, initialize the UART
- */
- info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- /*
- * and set the speed of the serial port
- */
- ctc_tty_change_speed(info);
-
- info->flags |= CTC_ASYNC_INITIALIZED;
- if (!(info->flags & CTC_ASYNC_NETDEV_OPEN))
- info->netdev->open(info->netdev);
- info->flags |= CTC_ASYNC_NETDEV_OPEN;
- return 0;
-}
-
-static void
-ctc_tty_stopdev(unsigned long data)
-{
- ctc_tty_info *info = (ctc_tty_info *)data;
-
- if ((!info) || (!info->netdev) ||
- (info->flags & CTC_ASYNC_INITIALIZED))
- return;
- info->netdev->stop(info->netdev);
- info->flags &= ~CTC_ASYNC_NETDEV_OPEN;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void
-ctc_tty_shutdown(ctc_tty_info * info)
-{
- DBF_TEXT(trace, 3, __FUNCTION__);
- if (!(info->flags & CTC_ASYNC_INITIALIZED))
- return;
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line);
-#endif
- info->msr &= ~UART_MSR_RI;
- if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
- info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- mod_timer(&info->stoptimer, jiffies + (10 * HZ));
- skb_queue_purge(&info->tx_queue);
- skb_queue_purge(&info->rx_queue);
- info->flags &= ~CTC_ASYNC_INITIALIZED;
-}
-
-/* ctc_tty_write() is the main send-routine. It is called from the upper
- * levels within the kernel to perform sending data. Depending on the
- * online-flag it either directs output to the at-command-interpreter or
- * to the lower level. Additional tasks done here:
- * - If online, check for escape-sequence (+++)
- * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes.
- * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed.
- * - If dialing, abort dial.
- */
-static int
-ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
-{
- int c;
- int total = 0;
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- DBF_TEXT(trace, 5, __FUNCTION__);
- if (ctc_tty_shuttingdown)
- goto ex;
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
- goto ex;
- if (!tty)
- goto ex;
- if (!info->netdev) {
- total = -ENODEV;
- goto ex;
- }
- while (1) {
- struct sk_buff *skb;
- int skb_res;
-
- c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE;
- if (c <= 0)
- break;
-
- skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
- + sizeof(__u32);
- skb = dev_alloc_skb(skb_res + c);
- if (!skb) {
- printk(KERN_WARNING
- "ctc_tty: Out of memory in %s%d write\n",
- CTC_TTY_NAME, info->line);
- break;
- }
- skb_reserve(skb, skb_res);
- memcpy(skb_put(skb, c), buf, c);
- skb_queue_tail(&info->tx_queue, skb);
- buf += c;
- total += c;
- count -= c;
- }
- if (!skb_queue_empty(&info->tx_queue)) {
- info->lsr &= ~UART_LSR_TEMT;
- tasklet_schedule(&info->tasklet);
- }
-ex:
- DBF_TEXT(trace, 6, __FUNCTION__);
- return total;
-}
-
-static int
-ctc_tty_write_room(struct tty_struct *tty)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room"))
- return 0;
- return CTC_TTY_XMIT_SIZE;
-}
-
-static int
-ctc_tty_chars_in_buffer(struct tty_struct *tty)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer"))
- return 0;
- return 0;
-}
-
-static void
-ctc_tty_flush_buffer(struct tty_struct *tty)
-{
- ctc_tty_info *info;
- unsigned long flags;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (!tty)
- goto ex;
- spin_lock_irqsave(&ctc_tty_lock, flags);
- info = (ctc_tty_info *) tty->driver_data;
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
- goto ex;
- }
- skb_queue_purge(&info->tx_queue);
- info->lsr |= UART_LSR_TEMT;
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-ex:
- DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
- return;
-}
-
-static void
-ctc_tty_flush_chars(struct tty_struct *tty)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_shuttingdown)
- return;
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
- return;
- if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue))
- return;
- tasklet_schedule(&info->tasklet);
-}
-
-/*
- * ------------------------------------------------------------
- * ctc_tty_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void
-ctc_tty_throttle(struct tty_struct *tty)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
- return;
- info->mcr &= ~UART_MCR_RTS;
- if (I_IXOFF(tty))
- ctc_tty_inject(info, STOP_CHAR(tty));
- ctc_tty_transmit_status(info);
-}
-
-static void
-ctc_tty_unthrottle(struct tty_struct *tty)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
- return;
- info->mcr |= UART_MCR_RTS;
- if (I_IXOFF(tty))
- ctc_tty_inject(info, START_CHAR(tty));
- ctc_tty_transmit_status(info);
-}
-
-/*
- * ------------------------------------------------------------
- * ctc_tty_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-/*
- * ctc_tty_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows RS485 driver to be written in user space.
- */
-static int
-ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value)
-{
- u_char status;
- uint result;
- ulong flags;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- spin_lock_irqsave(&ctc_tty_lock, flags);
- status = info->lsr;
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- put_user(result, value);
- return 0;
-}
-
-
-static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- u_char control,
- status;
- uint result;
- ulong flags;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- control = info->mcr;
- spin_lock_irqsave(&ctc_tty_lock, flags);
- status = info->msr;
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
- result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
- | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
- | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
- | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
- | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
- | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
- return result;
-}
-
-static int
-ctc_tty_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- if (set & TIOCM_RTS)
- info->mcr |= UART_MCR_RTS;
- if (set & TIOCM_DTR)
- info->mcr |= UART_MCR_DTR;
-
- if (clear & TIOCM_RTS)
- info->mcr &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR)
- info->mcr &= ~UART_MCR_DTR;
-
- if ((set | clear) & (TIOCM_RTS|TIOCM_DTR))
- ctc_tty_transmit_status(info);
- return 0;
-}
-
-static int
-ctc_tty_ioctl(struct tty_struct *tty, struct file *file,
- uint cmd, ulong arg)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- int error;
- int retval;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- switch (cmd) {
- case TCSBRK: /* SVID version: non-zero arg --> no break */
-#ifdef CTC_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line);
-#endif
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
-#ifdef CTC_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line);
-#endif
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- return 0;
- case TIOCGSOFTCAR:
-#ifdef CTC_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME,
- info->line);
-#endif
- error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
- return error;
- case TIOCSSOFTCAR:
-#ifdef CTC_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME,
- info->line);
-#endif
- error = get_user(arg, (ulong __user *) arg);
- if (error)
- return error;
- tty->termios->c_cflag =
- ((tty->termios->c_cflag & ~CLOCAL) |
- (arg ? CLOCAL : 0));
- return 0;
- case TIOCSERGETLSR: /* Get line status register */
-#ifdef CTC_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME,
- info->line);
-#endif
- if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint)))
- return ctc_tty_get_lsr_info(info, (uint __user *) arg);
- else
- return -EFAULT;
- default:
-#ifdef CTC_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd,
- CTC_TTY_NAME, info->line);
-#endif
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void
-ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- unsigned int cflag = tty->termios->c_cflag;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- ctc_tty_change_speed(info);
-
- /* Handle transition to B0 */
- if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
- info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS);
- ctc_tty_transmit_status(info);
- }
-
- /* Handle transition from B0 to other */
- if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
- info->mcr |= UART_MCR_DTR;
- if (!(tty->termios->c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
- info->mcr |= UART_MCR_RTS;
- }
- ctc_tty_transmit_status(info);
- }
-
- /* Handle turning off CRTSCTS */
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS))
- tty->hw_stopped = 0;
-}
-
-/*
- * ------------------------------------------------------------
- * ctc_tty_open() and friends
- * ------------------------------------------------------------
- */
-static int
-ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info)
-{
- DECLARE_WAITQUEUE(wait, NULL);
- int do_clocal = 0;
- unsigned long flags;
- int retval;
-
- DBF_TEXT(trace, 4, __FUNCTION__);
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (tty_hung_up_p(filp) ||
- (info->flags & CTC_ASYNC_CLOSING)) {
- if (info->flags & CTC_ASYNC_CLOSING)
- wait_event(info->close_wait,
- !(info->flags & CTC_ASYNC_CLOSING));
-#ifdef MODEM_DO_RESTART
- if (info->flags & CTC_ASYNC_HUP_NOTIFY)
- return -EAGAIN;
- else
- return -ERESTARTSYS;
-#else
- return -EAGAIN;
-#endif
- }
- /*
- * If non-blocking mode is set, then make the check up front
- * and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
- return 0;
- }
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->count is dropped by one, so that
- * ctc_tty_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&info->open_wait, &wait);
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n",
- CTC_TTY_NAME, info->line, info->count);
-#endif
- spin_lock_irqsave(&ctc_tty_lock, flags);
- if (!(tty_hung_up_p(filp)))
- info->count--;
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
- info->blocked_open++;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(info->flags & CTC_ASYNC_INITIALIZED)) {
-#ifdef MODEM_DO_RESTART
- if (info->flags & CTC_ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
-#else
- retval = -EAGAIN;
-#endif
- break;
- }
- if (!(info->flags & CTC_ASYNC_CLOSING) &&
- (do_clocal || (info->msr & UART_MSR_DCD))) {
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n",
- CTC_TTY_NAME, info->line, info->count);
-#endif
- schedule();
- }
- current->state = TASK_RUNNING;
- remove_wait_queue(&info->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- info->count++;
- info->blocked_open--;
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n",
- CTC_TTY_NAME, info->line, info->count);
-#endif
- if (retval)
- return retval;
- info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
- return 0;
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int
-ctc_tty_open(struct tty_struct *tty, struct file *filp)
-{
- ctc_tty_info *info;
- unsigned long saveflags;
- int retval,
- line;
-
- DBF_TEXT(trace, 3, __FUNCTION__);
- line = tty->index;
- if (line < 0 || line > CTC_TTY_MAX_DEVICES)
- return -ENODEV;
- info = &driver->info[line];
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open"))
- return -ENODEV;
- if (!info->netdev)
- return -ENODEV;
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name,
- info->count);
-#endif
- spin_lock_irqsave(&ctc_tty_lock, saveflags);
- info->count++;
- tty->driver_data = info;
- info->tty = tty;
- spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
- /*
- * Start up serial port
- */
- retval = ctc_tty_startup(info);
- if (retval) {
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_open return after startup\n");
-#endif
- return retval;
- }
- retval = ctc_tty_block_til_ready(tty, filp, info);
- if (retval) {
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n");
-#endif
- return retval;
- }
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name);
-#endif
- return 0;
-}
-
-static void
-ctc_tty_close(struct tty_struct *tty, struct file *filp)
-{
- ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- ulong flags;
- ulong timeout;
- DBF_TEXT(trace, 3, __FUNCTION__);
- if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
- return;
- spin_lock_irqsave(&ctc_tty_lock, flags);
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n");
-#endif
- return;
- }
- if ((tty->count == 1) && (info->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, "
- "info->count is %d\n", info->count);
- info->count = 1;
- }
- if (--info->count < 0) {
- printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n",
- CTC_TTY_NAME, info->line, info->count);
- info->count = 0;
- }
- if (info->count) {
- local_irq_restore(flags);
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n");
-#endif
- return;
- }
- info->flags |= CTC_ASYNC_CLOSING;
- tty->closing = 1;
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- if (info->flags & CTC_ASYNC_INITIALIZED) {
- tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(info->lsr & UART_LSR_TEMT)) {
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
- msleep(500);
- spin_lock_irqsave(&ctc_tty_lock, flags);
- if (time_after(jiffies,timeout))
- break;
- }
- }
- ctc_tty_shutdown(info);
- if (tty->driver->flush_buffer) {
- skb_queue_purge(&info->tx_queue);
- info->lsr |= UART_LSR_TEMT;
- }
- tty_ldisc_flush(tty);
- info->tty = 0;
- tty->closing = 0;
- if (info->blocked_open) {
- msleep_interruptible(500);
- wake_up_interruptible(&info->open_wait);
- }
- info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);
- wake_up_interruptible(&info->close_wait);
- spin_unlock_irqrestore(&ctc_tty_lock, flags);
-#ifdef CTC_DEBUG_MODEM_OPEN
- printk(KERN_DEBUG "ctc_tty_close normal exit\n");
-#endif
-}
-
-/*
- * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-static void
-ctc_tty_hangup(struct tty_struct *tty)
-{
- ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
- unsigned long saveflags;
- DBF_TEXT(trace, 3, __FUNCTION__);
- if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
- return;
- ctc_tty_shutdown(info);
- info->count = 0;
- info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE;
- spin_lock_irqsave(&ctc_tty_lock, saveflags);
- info->tty = 0;
- spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
- wake_up_interruptible(&info->open_wait);
-}
-
-
-/*
- * For all online tty's, try sending data to
- * the lower levels.
- */
-static void
-ctc_tty_task(unsigned long arg)
-{
- ctc_tty_info *info = (void *)arg;
- unsigned long saveflags;
- int again;
-
- DBF_TEXT(trace, 3, __FUNCTION__);
- spin_lock_irqsave(&ctc_tty_lock, saveflags);
- if ((!ctc_tty_shuttingdown) && info) {
- again = ctc_tty_tint(info);
- if (!again)
- info->lsr |= UART_LSR_TEMT;
- again |= ctc_tty_readmodem(info);
- if (again) {
- tasklet_schedule(&info->tasklet);
- }
- }
- spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-}
-
-static struct tty_operations ctc_ops = {
- .open = ctc_tty_open,
- .close = ctc_tty_close,
- .write = ctc_tty_write,
- .flush_chars = ctc_tty_flush_chars,
- .write_room = ctc_tty_write_room,
- .chars_in_buffer = ctc_tty_chars_in_buffer,
- .flush_buffer = ctc_tty_flush_buffer,
- .ioctl = ctc_tty_ioctl,
- .throttle = ctc_tty_throttle,
- .unthrottle = ctc_tty_unthrottle,
- .set_termios = ctc_tty_set_termios,
- .hangup = ctc_tty_hangup,
- .tiocmget = ctc_tty_tiocmget,
- .tiocmset = ctc_tty_tiocmset,
-};
-
-int
-ctc_tty_init(void)
-{
- int i;
- ctc_tty_info *info;
- struct tty_driver *device;
-
- DBF_TEXT(trace, 2, __FUNCTION__);
- driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
- if (driver == NULL) {
- printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
- return -ENOMEM;
- }
- memset(driver, 0, sizeof(ctc_tty_driver));
- device = alloc_tty_driver(CTC_TTY_MAX_DEVICES);
- if (!device) {
- kfree(driver);
- printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
- return -ENOMEM;
- }
-
- device->devfs_name = "ctc/" CTC_TTY_NAME;
- device->name = CTC_TTY_NAME;
- device->major = CTC_TTY_MAJOR;
- device->minor_start = 0;
- device->type = TTY_DRIVER_TYPE_SERIAL;
- device->subtype = SERIAL_TYPE_NORMAL;
- device->init_termios = tty_std_termios;
- device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- device->flags = TTY_DRIVER_REAL_RAW;
- device->driver_name = "ctc_tty",
- tty_set_operations(device, &ctc_ops);
- if (tty_register_driver(device)) {
- printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n");
- put_tty_driver(device);
- kfree(driver);
- return -1;
- }
- driver->ctc_tty_device = device;
- for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) {
- info = &driver->info[i];
- init_MUTEX(&info->write_sem);
- tasklet_init(&info->tasklet, ctc_tty_task,
- (unsigned long) info);
- info->magic = CTC_ASYNC_MAGIC;
- info->line = i;
- info->tty = 0;
- info->count = 0;
- info->blocked_open = 0;
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->close_wait);
- skb_queue_head_init(&info->tx_queue);
- skb_queue_head_init(&info->rx_queue);
- init_timer(&info->stoptimer);
- info->stoptimer.function = ctc_tty_stopdev;
- info->stoptimer.data = (unsigned long)info;
- info->mcr = UART_MCR_RTS;
- }
- return 0;
-}
-
-int
-ctc_tty_register_netdev(struct net_device *dev) {
- int ttynum;
- char *err;
- char *p;
-
- DBF_TEXT(trace, 2, __FUNCTION__);
- if ((!dev) || (!dev->name)) {
- printk(KERN_WARNING
- "ctc_tty_register_netdev called "
- "with NULL dev or NULL dev-name\n");
- return -1;
- }
-
- /*
- * If the name is a format string the caller wants us to
- * do a name allocation : format string must end with %d
- */
- if (strchr(dev->name, '%'))
- {
- int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this
- if (err < 0) {
- printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
- return err;
- }
-
- }
-
- for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
- ttynum = simple_strtoul(p, &err, 0);
- if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
- (err && *err)) {
- printk(KERN_WARNING
- "ctc_tty_register_netdev called "
- "with number in name '%s'\n", dev->name);
- return -1;
- }
- if (driver->info[ttynum].netdev) {
- printk(KERN_WARNING
- "ctc_tty_register_netdev called "
- "for already registered device '%s'\n",
- dev->name);
- return -1;
- }
- driver->info[ttynum].netdev = dev;
- return 0;
-}
-
-void
-ctc_tty_unregister_netdev(struct net_device *dev) {
- int i;
- unsigned long saveflags;
- ctc_tty_info *info = NULL;
-
- DBF_TEXT(trace, 2, __FUNCTION__);
- spin_lock_irqsave(&ctc_tty_lock, saveflags);
- for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
- if (driver->info[i].netdev == dev) {
- info = &driver->info[i];
- break;
- }
- if (info) {
- info->netdev = NULL;
- skb_queue_purge(&info->tx_queue);
- skb_queue_purge(&info->rx_queue);
- }
- spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-}
-
-void
-ctc_tty_cleanup(void) {
- unsigned long saveflags;
-
- DBF_TEXT(trace, 2, __FUNCTION__);
- spin_lock_irqsave(&ctc_tty_lock, saveflags);
- ctc_tty_shuttingdown = 1;
- spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
- tty_unregister_driver(driver->ctc_tty_device);
- put_tty_driver(driver->ctc_tty_device);
- kfree(driver);
- driver = NULL;
-}
diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h
deleted file mode 100644
index 7254dc006311..000000000000
--- a/drivers/s390/net/ctctty.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * CTC / ESCON network driver, tty interface.
- *
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _CTCTTY_H_
-#define _CTCTTY_H_
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-extern int ctc_tty_register_netdev(struct net_device *);
-extern void ctc_tty_unregister_netdev(struct net_device *);
-extern void ctc_tty_netif_rx(struct sk_buff *);
-extern int ctc_tty_init(void);
-extern void ctc_tty_cleanup(void);
-extern void ctc_tty_setcarrier(struct net_device *, int);
-
-#endif
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index b046ffa22101..de9ba7890b5a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -4297,6 +4297,7 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
{
if (ap->flags & ATA_FLAG_SUSPENDED) {
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
ap->flags &= ~ATA_FLAG_SUSPENDED;
ata_set_mode(ap);
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index fee843fab1c7..108910f512e4 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -982,6 +982,12 @@ static int device_check(ppa_struct *dev)
return -ENODEV;
}
+static int ppa_adjust_queue(struct scsi_device *device)
+{
+ blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
+ return 0;
+}
+
static struct scsi_host_template ppa_template = {
.module = THIS_MODULE,
.proc_name = "ppa",
@@ -997,6 +1003,7 @@ static struct scsi_host_template ppa_template = {
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
+ .slave_alloc = ppa_adjust_queue,
};
/***************************************************************************
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 9b8bca1ac1f0..f16f92a6ec0f 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -2035,6 +2035,7 @@ static void mv_phy_reset(struct ata_port *ap)
static void mv_eng_timeout(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
+ unsigned long flags;
printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
DPRINTK("All regs @ start of eng_timeout\n");
@@ -2046,8 +2047,10 @@ static void mv_eng_timeout(struct ata_port *ap)
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd);
+ spin_lock_irqsave(&ap->host_set->lock, flags);
mv_err_intr(ap, 0);
mv_stop_and_reset(ap);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
if (qc->flags & ATA_QCFLAG_ACTIVE) {
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index f7264fd611c2..cb9082fd7e2f 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -454,7 +454,7 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
*/
msleep(10);
- prb->ctrl = PRB_CTRL_SRST;
+ prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
prb->fis[1] = 0; /* no PM yet */
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
@@ -551,9 +551,9 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
if (qc->tf.flags & ATA_TFLAG_WRITE)
- prb->ctrl = PRB_CTRL_PACKET_WRITE;
+ prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE);
else
- prb->ctrl = PRB_CTRL_PACKET_READ;
+ prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ);
} else
prb->ctrl = 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 941c1e15c899..62f8cb7b3d2b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -165,6 +165,7 @@ static struct {
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
+ {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 764a8b375ead..faee4757c03a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -367,7 +367,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
int nsegs, unsigned bufflen, gfp_t gfp)
{
struct request_queue *q = rq->q;
- int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned int data_len = 0, len, bytes, off;
struct page *page;
struct bio *bio = NULL;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 8b6d65e21bae..f3b16066387c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -955,7 +955,8 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
list_for_each_entry(rphy, &sas_host->rphy_list, list) {
struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
- if (rphy->scsi_target_id == -1)
+ if (rphy->identify.device_type != SAS_END_DEVICE ||
+ rphy->scsi_target_id == -1)
continue;
if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) &&
@@ -977,7 +978,6 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
#define SETUP_TEMPLATE(attrb, field, perm, test) \
i->private_##attrb[count] = class_device_attr_##field; \
i->private_##attrb[count].attr.mode = perm; \
- i->private_##attrb[count].store = NULL; \
i->attrb[count] = &i->private_##attrb[count]; \
if (test) \
count++
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index acde8868da21..fafe7c1265b3 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -185,6 +185,9 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
/* Select Power Management Mode */
pxa27x_ohci_select_pmm(inf->port_mode);
+ if (inf->power_budget)
+ hcd->power_budget = inf->power_budget;
+
ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT);
diff --git a/drivers/usb/net/Kconfig b/drivers/usb/net/Kconfig
index efd6ca7e4ac5..054059632a21 100644
--- a/drivers/usb/net/Kconfig
+++ b/drivers/usb/net/Kconfig
@@ -301,21 +301,4 @@ config USB_NET_ZAURUS
some cases CDC MDLM) protocol, not "g_ether".
-config USB_ZD1201
- tristate "USB ZD1201 based Wireless device support"
- depends on NET_RADIO
- select FW_LOADER
- ---help---
- Say Y if you want to use wireless LAN adapters based on the ZyDAS
- ZD1201 chip.
-
- This driver makes the adapter appear as a normal Ethernet interface,
- typically on wlan0.
-
- The zd1201 device requires external firmware to be loaded.
- This can be found at http://linux-lc100020.sourceforge.net/
-
- To compile this driver as a module, choose M here: the
- module will be called zd1201.
-
endmenu
diff --git a/drivers/usb/net/Makefile b/drivers/usb/net/Makefile
index a21e6eaabaf6..160f19dbdf12 100644
--- a/drivers/usb/net/Makefile
+++ b/drivers/usb/net/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
-obj-$(CONFIG_USB_ZD1201) += zd1201.o
ifeq ($(CONFIG_USB_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 3d04b2def0f1..789450bb0bc9 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -214,10 +214,13 @@ int au1100fb_setmode(struct au1100fb_device *fbdev)
*/
int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi)
{
- struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
- u32 *palette = fbdev->regs->lcd_pallettebase;
+ struct au1100fb_device *fbdev;
+ u32 *palette;
u32 value;
+ fbdev = to_au1100fb_device(fbi);
+ palette = fbdev->regs->lcd_pallettebase;
+
if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1))
return -EINVAL;
@@ -316,9 +319,11 @@ int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
*/
int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
- struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
+ struct au1100fb_device *fbdev;
int dy;
+ fbdev = to_au1100fb_device(fbi);
+
print_dbg("fb_pan_display %p %p", var, fbi);
if (!var || !fbdev) {
@@ -382,10 +387,12 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
*/
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
- struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
+ struct au1100fb_device *fbdev;
unsigned int len;
unsigned long start=0, off;
+ fbdev = to_au1100fb_device(fbi);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
return -EINVAL;
}
@@ -467,7 +474,7 @@ int au1100fb_drv_probe(struct device *dev)
if (!request_mem_region(au1100fb_fix.mmio_start, au1100fb_fix.mmio_len,
DRIVER_NAME)) {
- print_err("fail to lock memory region at 0x%08x",
+ print_err("fail to lock memory region at 0x%08lx",
au1100fb_fix.mmio_start);
return -EBUSY;
}
@@ -595,13 +602,13 @@ int au1100fb_drv_remove(struct device *dev)
return 0;
}
-int au1100fb_drv_suspend(struct device *dev, u32 state, u32 level)
+int au1100fb_drv_suspend(struct device *dev, pm_message_t state)
{
/* TODO */
return 0;
}
-int au1100fb_drv_resume(struct device *dev, u32 level)
+int au1100fb_drv_resume(struct device *dev)
{
/* TODO */
return 0;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index ca020719d20b..47ba1a79adcd 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
fbcon_redraw_move(vc, p, 0, t, count);
ypan_up_redraw(vc, t, count);
if (vc->vc_rows - b > 0)
- fbcon_redraw_move(vc, p, b - count,
+ fbcon_redraw_move(vc, p, b,
vc->vc_rows - b, b);
} else
fbcon_redraw_move(vc, p, t + count, b - t - count, t);
@@ -2631,7 +2631,7 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
scr_memcpyw((u16 *) q, (u16 *) p,
vc->vc_size_row);
}
- softback_in = p;
+ softback_in = softback_curr = p;
update_region(vc, vc->vc_origin,
logo_lines * vc->vc_cols);
}
diff --git a/drivers/video/maxinefb.c b/drivers/video/maxinefb.c
index 743e7ad26acc..f85421bf7cb5 100644
--- a/drivers/video/maxinefb.c
+++ b/drivers/video/maxinefb.c
@@ -55,7 +55,7 @@ static struct fb_var_screeninfo maxinefb_defined = {
};
static struct fb_fix_screeninfo maxinefb_fix = {
- .id = "Maxine onboard graphics 1024x768x8",
+ .id = "Maxine",
.smem_len = (1024*768),
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -107,8 +107,6 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
static struct fb_ops maxinefb_ops = {
.owner = THIS_MODULE,
- .fb_get_fix = gen_get_fix,
- .fb_get_var = gen_get_var,
.fb_setcolreg = maxinefb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
diff --git a/fs/bio.c b/fs/bio.c
index 098c12b2d60a..6a0b9ad8f8c9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -654,9 +654,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
write_to_vm, 0, &pages[cur_page], NULL);
up_read(&current->mm->mmap_sem);
- if (ret < local_nr_pages)
+ if (ret < local_nr_pages) {
+ ret = -EFAULT;
goto out_unmap;
-
+ }
offset = uaddr & ~PAGE_MASK;
for (j = cur_page; j < page_limit; j++) {
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 85d166cdcae4..b55b4ea9a676 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
int mode, dev_t dev)
{
- struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev);
+ struct inode *inode;
int error = -EPERM;
if (dentry->d_inode)
return -EEXIST;
+ inode = debugfs_get_inode(dir->i_sb, mode, dev);
if (inode) {
d_instantiate(dentry, inode);
dget(dentry);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 8aac5334680d..34b39e9a1e5a 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -767,7 +767,6 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
if (input->group != sbi->s_groups_count) {
ext3_warning(sb, __FUNCTION__,
"multiple resizers run on filesystem!");
- unlock_super(sb);
err = -EBUSY;
goto exit_journal;
}
diff --git a/fs/locks.c b/fs/locks.c
index 6f99c0a6f836..ab61a8b54829 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -755,6 +755,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
if (request->fl_type == F_UNLCK)
goto out;
+ error = -ENOMEM;
new_fl = locks_alloc_lock();
if (new_fl == NULL)
goto out;
@@ -781,6 +782,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
locks_copy_lock(new_fl, request);
locks_insert_lock(&inode->i_flock, new_fl);
new_fl = NULL;
+ error = 0;
out:
unlock_kernel();
diff --git a/fs/namei.c b/fs/namei.c
index 96723ae83c89..d6e2ee251736 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1080,8 +1080,8 @@ static int fastcall do_path_lookup(int dfd, const char *name,
nd->flags = flags;
nd->depth = 0;
- read_lock(&current->fs->lock);
if (*name=='/') {
+ read_lock(&current->fs->lock);
if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
nd->mnt = mntget(current->fs->altrootmnt);
nd->dentry = dget(current->fs->altroot);
@@ -1092,33 +1092,35 @@ static int fastcall do_path_lookup(int dfd, const char *name,
}
nd->mnt = mntget(current->fs->rootmnt);
nd->dentry = dget(current->fs->root);
+ read_unlock(&current->fs->lock);
} else if (dfd == AT_FDCWD) {
+ read_lock(&current->fs->lock);
nd->mnt = mntget(current->fs->pwdmnt);
nd->dentry = dget(current->fs->pwd);
+ read_unlock(&current->fs->lock);
} else {
struct dentry *dentry;
file = fget_light(dfd, &fput_needed);
retval = -EBADF;
if (!file)
- goto unlock_fail;
+ goto out_fail;
dentry = file->f_dentry;
retval = -ENOTDIR;
if (!S_ISDIR(dentry->d_inode->i_mode))
- goto fput_unlock_fail;
+ goto fput_fail;
retval = file_permission(file, MAY_EXEC);
if (retval)
- goto fput_unlock_fail;
+ goto fput_fail;
nd->mnt = mntget(file->f_vfsmnt);
nd->dentry = dget(dentry);
fput_light(file, fput_needed);
}
- read_unlock(&current->fs->lock);
current->total_link_count = 0;
retval = link_path_walk(name, nd);
out:
@@ -1127,13 +1129,12 @@ out:
nd->dentry->d_inode))
audit_inode(name, nd->dentry->d_inode, flags);
}
+out_fail:
return retval;
-fput_unlock_fail:
+fput_fail:
fput_light(file, fput_needed);
-unlock_fail:
- read_unlock(&current->fs->lock);
- return retval;
+ goto out_fail;
}
int fastcall path_lookup(const char *name, unsigned int flags,
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 9950706abdf8..e1432102be05 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -45,10 +45,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
#define hard_smp_processor_id() __hard_smp_processor_id()
#define raw_smp_processor_id() (current_thread_info()->cpu)
-extern cpumask_t cpu_present_mask;
-extern cpumask_t cpu_online_map;
extern int smp_num_cpus;
-#define cpu_possible_map cpu_present_mask
+#define cpu_possible_map cpu_present_map
int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
diff --git a/include/asm-arm/arch-ixp23xx/memory.h b/include/asm-arm/arch-ixp23xx/memory.h
index 6e19f46d54d1..c85fc06a043c 100644
--- a/include/asm-arm/arch-ixp23xx/memory.h
+++ b/include/asm-arm/arch-ixp23xx/memory.h
@@ -49,7 +49,7 @@ static inline int __ixp23xx_arch_is_coherent(void)
{
extern unsigned int processor_id;
- if (((processor_id & 15) >= 2) || machine_is_roadrunner())
+ if (((processor_id & 15) >= 4) || machine_is_roadrunner())
return 1;
return 0;
diff --git a/include/asm-arm/arch-l7200/serial_l7200.h b/include/asm-arm/arch-l7200/serial_l7200.h
index 238c595d97ea..b1008a9d23e5 100644
--- a/include/asm-arm/arch-l7200/serial_l7200.h
+++ b/include/asm-arm/arch-l7200/serial_l7200.h
@@ -28,7 +28,7 @@
#define UARTDR 0x00 /* Tx/Rx data */
#define RXSTAT 0x04 /* Rx status */
#define H_UBRLCR 0x08 /* mode register high */
-#define M_UBRLCR 0x0C /* mode reg mid (MSB of buad)*/
+#define M_UBRLCR 0x0C /* mode reg mid (MSB of baud)*/
#define L_UBRLCR 0x10 /* mode reg low (LSB of baud)*/
#define UARTCON 0x14 /* control register */
#define UARTFLG 0x18 /* flag register */
diff --git a/include/asm-arm/arch-l7200/uncompress.h b/include/asm-arm/arch-l7200/uncompress.h
index 9fcd40aee3e3..04be2a088639 100644
--- a/include/asm-arm/arch-l7200/uncompress.h
+++ b/include/asm-arm/arch-l7200/uncompress.h
@@ -6,7 +6,7 @@
* Changelog:
* 05-01-2000 SJH Created
* 05-13-2000 SJH Filled in function bodies
- * 07-26-2000 SJH Removed hard coded buad rate
+ * 07-26-2000 SJH Removed hard coded baud rate
*/
#include <asm/hardware.h>
diff --git a/include/asm-arm/arch-pxa/ohci.h b/include/asm-arm/arch-pxa/ohci.h
index 7da89569061e..e848a47128cd 100644
--- a/include/asm-arm/arch-pxa/ohci.h
+++ b/include/asm-arm/arch-pxa/ohci.h
@@ -11,6 +11,8 @@ struct pxaohci_platform_data {
#define PMM_NPS_MODE 1
#define PMM_GLOBAL_MODE 2
#define PMM_PERPORT_MODE 3
+
+ int power_budget;
};
extern void pxa_set_ohci_info(struct pxaohci_platform_data *info);
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 95b3abf4851b..7c9568d30307 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -127,6 +127,12 @@ static inline int cpu_is_xsc3(void)
}
#endif
+#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
+#define cpu_is_xscale() 0
+#else
+#define cpu_is_xscale() 1
+#endif
+
#define set_cr(x) \
__asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 358e4d309ceb..c2059a3a0621 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define lazy_mmu_prot_update(pte) do { } while (0)
#endif
-#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE
+#ifndef __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, prot, old_addr, new_addr) (pte)
-#else
-#define move_pte(pte, prot, old_addr, new_addr) \
-({ \
- pte_t newpte = (pte); \
- if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
- pte_page(pte) == ZERO_PAGE(old_addr)) \
- newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
- newpte; \
-})
#endif
/*
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h
index 42520cc84b0f..1386af1cb7d9 100644
--- a/include/asm-mips/addrspace.h
+++ b/include/asm-mips/addrspace.h
@@ -129,6 +129,7 @@
#if defined (CONFIG_CPU_R4300) \
|| defined (CONFIG_CPU_R4X00) \
|| defined (CONFIG_CPU_R5000) \
+ || defined (CONFIG_CPU_RM7000) \
|| defined (CONFIG_CPU_NEVADA) \
|| defined (CONFIG_CPU_TX49XX) \
|| defined (CONFIG_CPU_MIPS64)
diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h
index 818b9a97e214..dff2a0a52f8f 100644
--- a/include/asm-mips/cpu.h
+++ b/include/asm-mips/cpu.h
@@ -51,6 +51,7 @@
#define PRID_IMP_R4300 0x0b00
#define PRID_IMP_VR41XX 0x0c00
#define PRID_IMP_R12000 0x0e00
+#define PRID_IMP_R14000 0x0f00
#define PRID_IMP_R8000 0x1000
#define PRID_IMP_PR4450 0x1200
#define PRID_IMP_R4600 0x2000
@@ -87,6 +88,7 @@
#define PRID_IMP_24K 0x9300
#define PRID_IMP_34K 0x9500
#define PRID_IMP_24KE 0x9600
+#define PRID_IMP_74K 0x9700
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -196,7 +198,9 @@
#define CPU_34K 60
#define CPU_PR4450 61
#define CPU_SB1A 62
-#define CPU_LAST 62
+#define CPU_74K 63
+#define CPU_R14000 64
+#define CPU_LAST 64
/*
* ISA Level encodings
diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h
index 64dd45150f64..928f30f8c45c 100644
--- a/include/asm-mips/delay.h
+++ b/include/asm-mips/delay.h
@@ -19,20 +19,22 @@ static inline void __delay(unsigned long loops)
{
if (sizeof(long) == 4)
__asm__ __volatile__ (
- ".set\tnoreorder\n"
- "1:\tbnez\t%0,1b\n\t"
- "subu\t%0,1\n\t"
- ".set\treorder"
+ " .set noreorder \n"
+ " .align 3 \n"
+ "1: bnez %0, 1b \n"
+ " subu %0, 1 \n"
+ " .set reorder \n"
: "=r" (loops)
: "0" (loops));
else if (sizeof(long) == 8)
__asm__ __volatile__ (
- ".set\tnoreorder\n"
- "1:\tbnez\t%0,1b\n\t"
- "dsubu\t%0,1\n\t"
- ".set\treorder"
- :"=r" (loops)
- :"0" (loops));
+ " .set noreorder \n"
+ " .align 3 \n"
+ "1: bnez %0, 1b \n"
+ " dsubu %0, 1 \n"
+ " .set reorder \n"
+ : "=r" (loops)
+ : "0" (loops));
}
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index a554089991f2..12d118f1bc9c 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -7,6 +7,7 @@
#include <linux/futex.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
+#include <asm/war.h>
#ifdef CONFIG_SMP
#define __FUTEX_SMP_SYNC " sync \n"
@@ -16,30 +17,58 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " .set mips3 \n" \
- "1: ll %1, (%3) # __futex_atomic_op1 \n" \
- " .set mips0 \n" \
- " " insn " \n" \
- " .set mips3 \n" \
- "2: sc $1, (%3) \n" \
- " beqzl $1, 1b \n" \
- __FUTEX_SMP_SYNC \
- "3: \n" \
- " .set pop \n" \
- " .set mips0 \n" \
- " .section .fixup,\"ax\" \n" \
- "4: li %0, %5 \n" \
- " j 2b \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " "__UA_ADDR "\t1b, 4b \n" \
- " "__UA_ADDR "\t2b, 4b \n" \
- " .previous \n" \
- : "=r" (ret), "=r" (oldval) \
- : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \
+ if (cpu_has_llsc && R10000_LLSC_WAR) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips3 \n" \
+ "1: ll %1, (%3) # __futex_atomic_op \n" \
+ " .set mips0 \n" \
+ " " insn " \n" \
+ " .set mips3 \n" \
+ "2: sc $1, (%3) \n" \
+ " beqzl $1, 1b \n" \
+ __FUTEX_SMP_SYNC \
+ "3: \n" \
+ " .set pop \n" \
+ " .set mips0 \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %5 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=r" (oldval) \
+ : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \
+ } else if (cpu_has_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips3 \n" \
+ "1: ll %1, (%3) # __futex_atomic_op \n" \
+ " .set mips0 \n" \
+ " " insn " \n" \
+ " .set mips3 \n" \
+ "2: sc $1, (%3) \n" \
+ " beqz $1, 1b \n" \
+ __FUTEX_SMP_SYNC \
+ "3: \n" \
+ " .set pop \n" \
+ " .set mips0 \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %5 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=r" (oldval) \
+ : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \
+ } else \
+ ret = -ENOSYS; \
}
static inline int
@@ -102,7 +131,69 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- return -ENOSYS;
+ int retval;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 \n"
+ " bne %0, %z3, 3f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ "2: sc $1, %1 \n"
+ " beqzl $1, 1b \n"
+ __FUTEX_SMP_SYNC
+ "3: \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "=&r" (retval), "=R" (*uaddr)
+ : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 \n"
+ " bne %0, %z3, 3f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ "2: sc $1, %1 \n"
+ " beqz $1, 1b \n"
+ __FUTEX_SMP_SYNC
+ "3: \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "=&r" (retval), "=R" (*uaddr)
+ : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
+ : "memory");
+ } else
+ return -ENOSYS;
+
+ return retval;
}
#endif
diff --git a/include/asm-mips/inst.h b/include/asm-mips/inst.h
index e0745f4ff624..1ed8d0f62577 100644
--- a/include/asm-mips/inst.h
+++ b/include/asm-mips/inst.h
@@ -6,6 +6,7 @@
* for more details.
*
* Copyright (C) 1996, 2000 by Ralf Baechle
+ * Copyright (C) 2006 by Thiemo Seufer
*/
#ifndef _ASM_INST_H
#define _ASM_INST_H
@@ -21,14 +22,14 @@ enum major_op {
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
daddi_op, daddiu_op, ldl_op, ldr_op,
- major_1c_op, jalx_op, major_1e_op, major_1f_op,
+ spec2_op, jalx_op, mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
ll_op, lwc1_op, lwc2_op, pref_op,
lld_op, ldc1_op, ldc2_op, ld_op,
- sc_op, swc1_op, swc2_op, rdhwr_op,
+ sc_op, swc1_op, swc2_op, major_3b_op,
scd_op, sdc1_op, sdc2_op, sd_op
};
@@ -37,7 +38,7 @@ enum major_op {
*/
enum spec_op {
sll_op, movc_op, srl_op, sra_op,
- sllv_op, srlv_op, srav_op, spec1_unused_op, /* Opcode 0x07 is unused */
+ sllv_op, pmon_op, srlv_op, srav_op,
jr_op, jalr_op, movz_op, movn_op,
syscall_op, break_op, spim_op, sync_op,
mfhi_op, mthi_op, mflo_op, mtlo_op,
@@ -55,6 +56,28 @@ enum spec_op {
};
/*
+ * func field of spec2 opcode.
+ */
+enum spec2_op {
+ madd_op, maddu_op, mul_op, spec2_3_unused_op,
+ msub_op, msubu_op, /* more unused ops */
+ clz_op = 0x20, clo_op,
+ dclz_op = 0x24, dclo_op,
+ sdbpp_op = 0x3f
+};
+
+/*
+ * func field of spec3 opcode.
+ */
+enum spec3_op {
+ ext_op, dextm_op, dextu_op, dext_op,
+ ins_op, dinsm_op, dinsu_op, dins_op,
+ bshfl_op = 0x20,
+ dbshfl_op = 0x24,
+ rdhwr_op = 0x3f
+};
+
+/*
* rt field of bcond opcodes.
*/
enum rt_op {
@@ -151,8 +174,8 @@ enum cop1x_func {
* func field for mad opcodes (MIPS IV).
*/
enum mad_func {
- madd_op = 0x08, msub_op = 0x0a,
- nmadd_op = 0x0c, nmsub_op = 0x0e
+ madd_fp_op = 0x08, msub_fp_op = 0x0a,
+ nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
};
/*
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index a2ef579f6b1a..5af7517fce8a 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -291,7 +291,7 @@
#define ST0_DL (_ULCAST_(1) << 24)
/*
- * Enable the MIPS DSP ASE
+ * Enable the MIPS MDMX and DSP ASEs
*/
#define ST0_MX 0x01000000
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index a1eab136ff6c..4035ec79ecd4 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -139,9 +139,11 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#ifndef CONFIG_SPARSEMEM
#ifndef CONFIG_NEED_MULTIPLE_NODES
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif
+#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4d6bc45df594..087c20769256 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -177,48 +177,67 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
/*
- * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset
- * into this range:
+ * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
*/
-#define PTE_FILE_MAX_BITS 27
+#define PTE_FILE_MAX_BITS 28
-#define pte_to_pgoff(_pte) \
- ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 ))
+#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
+ (((_pte).pte >> 2 ) & 0x38) | \
+ (((_pte).pte >> 10) << 6 ))
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE })
+#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
+ (((off) & 0x38) << 2 ) | \
+ (((off) >> 6 ) << 10) | \
+ _PAGE_FILE })
#else
/* Swap entries must have VALID and GLOBAL bits cleared. */
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((x).val >> 7)
+#define __swp_entry(type,offset) \
+ ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+#else
#define __swp_type(x) (((x).val >> 8) & 0x1f)
-#define __swp_offset(x) ((x).val >> 13)
+#define __swp_offset(x) ((x).val >> 13)
#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+ ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
/*
- * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset
- * into this range:
+ * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
*/
-#define PTE_FILE_MAX_BITS 27
+#define PTE_FILE_MAX_BITS 30
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
- /* fixme */
-#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f))
-#define pgoff_to_pte(off) \
- ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)})
+#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
#else
-#define pte_to_pgoff(_pte) \
- ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
+/*
+ * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
+ */
+#define PTE_FILE_MAX_BITS 28
+
+#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
+ (((_pte).pte >> 2) & 0x8) | \
+ (((_pte).pte >> 8) << 4))
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
+#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
+ (((off) & 0x8) << 2) | \
+ (((off) >> 4) << 8) | \
+ _PAGE_FILE })
#endif
#endif
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+#else
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+#endif
#endif /* _ASM_PGTABLE_32_H */
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 82166b254b27..2faf5c9ff127 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -224,15 +224,12 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
- * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset
- * into this range:
+ * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
+ * make things easier, and only use the upper 56 bits for the page offset...
*/
-#define PTE_FILE_MAX_BITS 32
+#define PTE_FILE_MAX_BITS 56
-#define pte_to_pgoff(_pte) \
- ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
+#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
#endif /* _ASM_PGTABLE_64_H */
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 702a28fa7a34..d0af2a3b0152 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -70,7 +70,15 @@ extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
-#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE
+#define __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) \
+({ \
+ pte_t newpte = (pte); \
+ if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
+ pte_page(pte) == ZERO_PAGE(old_addr)) \
+ newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
+ newpte; \
+})
extern void paging_init(void);
@@ -82,10 +90,11 @@ extern void paging_init(void);
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pmd_page_kernel(pmd) pmd_val(pmd)
-#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
-#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+
+#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
+
static inline void set_pte(pte_t *ptep, pte_t pte)
{
ptep->pte_high = pte.pte_high;
@@ -93,27 +102,35 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
ptep->pte_low = pte.pte_low;
//printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
- if (pte_val(pte) & _PAGE_GLOBAL) {
+ if (pte.pte_low & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
- if (pte_none(*buddy))
- buddy->pte_low |= _PAGE_GLOBAL;
+ if (pte_none(*buddy)) {
+ buddy->pte_low |= _PAGE_GLOBAL;
+ buddy->pte_high |= _PAGE_GLOBAL;
+ }
}
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ pte_t null = __pte(0);
+
/* Preserve global status for the pair */
- if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
- set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
- else
- set_pte_at(mm, addr, ptep, __pte(0));
+ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+ null.pte_low = null.pte_high = _PAGE_GLOBAL;
+
+ set_pte_at(mm, addr, ptep, null);
}
#else
+
+#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+
/*
* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
@@ -174,75 +191,76 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
*/
static inline int pte_user(pte_t pte) { BUG(); return 0; }
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
-static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_READ; }
-static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; }
-static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; }
-static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
+static inline int pte_read(pte_t pte) { return pte.pte_low & _PAGE_READ; }
+static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
+static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
+
static inline pte_t pte_wrprotect(pte_t pte)
{
- (pte).pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
- (pte).pte_high &= ~_PAGE_SILENT_WRITE;
+ pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
+ pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_rdprotect(pte_t pte)
{
- (pte).pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ);
- (pte).pte_high &= ~_PAGE_SILENT_READ;
+ pte.pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ);
+ pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
- (pte).pte_low &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
- (pte).pte_high &= ~_PAGE_SILENT_WRITE;
+ pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
+ pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
- (pte).pte_low &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
- (pte).pte_high &= ~_PAGE_SILENT_READ;
+ pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
+ pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
- (pte).pte_low |= _PAGE_WRITE;
- if ((pte).pte_low & _PAGE_MODIFIED) {
- (pte).pte_low |= _PAGE_SILENT_WRITE;
- (pte).pte_high |= _PAGE_SILENT_WRITE;
+ pte.pte_low |= _PAGE_WRITE;
+ if (pte.pte_low & _PAGE_MODIFIED) {
+ pte.pte_low |= _PAGE_SILENT_WRITE;
+ pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
- (pte).pte_low |= _PAGE_READ;
- if ((pte).pte_low & _PAGE_ACCESSED) {
- (pte).pte_low |= _PAGE_SILENT_READ;
- (pte).pte_high |= _PAGE_SILENT_READ;
+ pte.pte_low |= _PAGE_READ;
+ if (pte.pte_low & _PAGE_ACCESSED) {
+ pte.pte_low |= _PAGE_SILENT_READ;
+ pte.pte_high |= _PAGE_SILENT_READ;
}
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- (pte).pte_low |= _PAGE_MODIFIED;
- if ((pte).pte_low & _PAGE_WRITE) {
- (pte).pte_low |= _PAGE_SILENT_WRITE;
- (pte).pte_high |= _PAGE_SILENT_WRITE;
+ pte.pte_low |= _PAGE_MODIFIED;
+ if (pte.pte_low & _PAGE_WRITE) {
+ pte.pte_low |= _PAGE_SILENT_WRITE;
+ pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
- (pte).pte_low |= _PAGE_ACCESSED;
- if ((pte).pte_low & _PAGE_READ)
- (pte).pte_low |= _PAGE_SILENT_READ;
- (pte).pte_high |= _PAGE_SILENT_READ;
+ pte.pte_low |= _PAGE_ACCESSED;
+ if (pte.pte_low & _PAGE_READ)
+ pte.pte_low |= _PAGE_SILENT_READ;
+ pte.pte_high |= _PAGE_SILENT_READ;
return pte;
}
#else
@@ -335,8 +353,9 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- pte.pte_low &= _PAGE_CHG_MASK;
- pte.pte_low |= pgprot_val(newprot);
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_high &= ~0x3f;
+ pte.pte_low |= pgprot_val(newprot);
pte.pte_high |= pgprot_val(newprot) & 0x3f;
return pte;
}
diff --git a/include/asm-mips/sigcontext.h b/include/asm-mips/sigcontext.h
index 8edabb0be23f..cefa657dd04a 100644
--- a/include/asm-mips/sigcontext.h
+++ b/include/asm-mips/sigcontext.h
@@ -55,8 +55,14 @@ struct sigcontext {
struct sigcontext {
unsigned long sc_regs[32];
unsigned long sc_fpregs[32];
- unsigned long sc_hi[4];
- unsigned long sc_lo[4];
+ unsigned long sc_mdhi;
+ unsigned long sc_hi1;
+ unsigned long sc_hi2;
+ unsigned long sc_hi3;
+ unsigned long sc_mdlo;
+ unsigned long sc_lo1;
+ unsigned long sc_lo2;
+ unsigned long sc_lo3;
unsigned long sc_pc;
unsigned int sc_fpc_csr;
unsigned int sc_used_math;
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 75c6fe7c2126..e14e4b69de21 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -48,7 +48,6 @@ extern struct call_data_struct *call_data;
#define SMP_CALL_FUNCTION 0x2
extern cpumask_t phys_cpu_present_map;
-extern cpumask_t cpu_online_map;
#define cpu_possible_map phys_cpu_present_map
extern cpumask_t cpu_callout_map;
@@ -86,9 +85,9 @@ extern void prom_init_secondary(void);
extern void plat_smp_setup(void);
/*
- * Called after init_IRQ but before __cpu_up.
+ * Called in smp_prepare_cpus.
*/
-extern void prom_prepare_cpus(unsigned int max_cpus);
+extern void plat_prepare_cpus(unsigned int max_cpus);
/*
* Last chance for the board code to finish SMP initialization before
diff --git a/include/asm-mips/sparsemem.h b/include/asm-mips/sparsemem.h
new file mode 100644
index 000000000000..795ac6c23203
--- /dev/null
+++ b/include/asm-mips/sparsemem.h
@@ -0,0 +1,14 @@
+#ifndef _MIPS_SPARSEMEM_H
+#define _MIPS_SPARSEMEM_H
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 28
+#define MAX_PHYSMEM_BITS 35
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _MIPS_SPARSEMEM_H */
+
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 9fcf0162d859..f6265c2a0dd2 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -329,7 +329,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO)
+ CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
#endif
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index 40c25e166a9b..1802775568b9 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -11,23 +11,24 @@
#define __futex_atomic_fixup \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
- " .long 0b,2b,1b,2b\n" \
+ " .long 0b,4b,2b,4b,3b,4b\n" \
".previous"
#else /* __s390x__ */
#define __futex_atomic_fixup \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
- " .quad 0b,2b,1b,2b\n" \
+ " .quad 0b,4b,2b,4b,3b,4b\n" \
".previous"
#endif /* __s390x__ */
#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile(" l %1,0(%6)\n" \
- "0: " insn \
- " cs %1,%2,0(%6)\n" \
- "1: jl 0b\n" \
+ asm volatile(" sacf 256\n" \
+ "0: l %1,0(%6)\n" \
+ "1: " insn \
+ "2: cs %1,%2,0(%6)\n" \
+ "3: jl 1b\n" \
" lhi %0,0\n" \
- "2:\n" \
+ "4: sacf 0\n" \
__futex_atomic_fixup \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index db0606c1abd4..bea727904287 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -98,8 +98,8 @@
#define __LC_KERNEL_ASCE 0xD58
#define __LC_USER_ASCE 0xD60
#define __LC_PANIC_STACK 0xD68
-#define __LC_CPUID 0xD90
-#define __LC_CPUADDR 0xD98
+#define __LC_CPUID 0xD80
+#define __LC_CPUADDR 0xD88
#define __LC_IPLDEV 0xDB8
#define __LC_JIFFY_TIMER 0xDC0
#define __LC_CURRENT 0xDD8
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index c44e7466534e..cd464f469a2c 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -689,6 +689,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
#define pte_clear(mm,addr,ptep) \
set_pte_at((mm), (addr), (ptep), __pte(0UL))
+#ifdef DCACHE_ALIASING_POSSIBLE
+#define __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) \
+({ \
+ pte_t newpte = (pte); \
+ if (tlb_type != hypervisor && pte_present(pte)) { \
+ unsigned long this_pfn = pte_pfn(pte); \
+ \
+ if (pfn_valid(this_pfn) && \
+ (((old_addr) ^ (new_addr)) & (1 << 13))) \
+ flush_dcache_page_all(current->mm, \
+ pfn_to_page(this_pfn)); \
+ } \
+ newpte; \
+})
+#endif
+
extern pgd_t swapper_pg_dir[2048];
extern pmd_t swapper_low_pmd_dir[2048];
diff --git a/include/asm-um/irqflags.h b/include/asm-um/irqflags.h
new file mode 100644
index 000000000000..659b9abdfdba
--- /dev/null
+++ b/include/asm-um/irqflags.h
@@ -0,0 +1,6 @@
+#ifndef __UM_IRQFLAGS_H
+#define __UM_IRQFLAGS_H
+
+/* Empty for now */
+
+#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index bea5a015f667..16c734af9193 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -41,11 +41,11 @@
#define __get_user(x, ptr) \
({ \
- const __typeof__(ptr) __private_ptr = ptr; \
+ const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
__typeof__(x) __private_val; \
int __private_ret = -EFAULT; \
(x) = (__typeof__(*(__private_ptr)))0; \
- if (__copy_from_user((void *) &__private_val, (__private_ptr), \
+ if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
sizeof(*(__private_ptr))) == 0) { \
(x) = (__typeof__(*(__private_ptr))) __private_val; \
__private_ret = 0; \
@@ -62,7 +62,7 @@
#define __put_user(x, ptr) \
({ \
- __typeof__(ptr) __private_ptr = ptr; \
+ __typeof__(*(ptr)) __user *__private_ptr = ptr; \
__typeof__(*(__private_ptr)) __private_val; \
int __private_ret = -EFAULT; \
__private_val = (__typeof__(*(__private_ptr))) (x); \
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index c98633af07d2..b4f8f4a41a6e 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -159,7 +159,7 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
/* 1GB for 64bit, 8MB for 32bit */
-#define STACK_RND_MASK (is_compat_task() ? 0x7ff : 0x3fffff)
+#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index 721371382ae5..08734e660d41 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -117,6 +117,10 @@ extern void console_stop(struct console *);
extern void console_start(struct console *);
extern int is_console_locked(void);
+/* Suspend and resume console messages over PM events */
+extern void suspend_console(void);
+extern void resume_console(void);
+
/* Some debug stub to catch some of the obvious races in the VT code */
#if 1
#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ad133fcfb239..1713ace808bf 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
+typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *);
typedef void (elevator_exit_fn) (elevator_t *);
struct elevator_ops
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index dd7d627bf66f..c115e9e840b4 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
mmsg->mfa = readl(c->in_port);
if (unlikely(mmsg->mfa >= c->in_queue.len)) {
+ u32 mfa = mmsg->mfa;
+
mempool_free(mmsg, c->in_msg.mempool);
- if(mmsg->mfa == I2O_QUEUE_EMPTY)
+
+ if (mfa == I2O_QUEUE_EMPTY)
return ERR_PTR(-EBUSY);
return ERR_PTR(-EFAULT);
}
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h
index 9065199319d0..915d6b4f0f89 100644
--- a/include/linux/m48t86.h
+++ b/include/linux/m48t86.h
@@ -11,6 +11,6 @@
struct m48t86_ops
{
- void (*writeb)(unsigned char value, unsigned long addr);
- unsigned char (*readb)(unsigned long addr);
+ void (*writebyte)(unsigned char value, unsigned long addr);
+ unsigned char (*readbyte)(unsigned long addr);
};
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 6a7621b2b12b..f5fdca1d67e6 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -36,6 +36,7 @@
#include <linux/nodemask.h>
struct vm_area_struct;
+struct mm_struct;
#ifdef CONFIG_NUMA
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36740354d4db..2d8337150493 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -15,6 +15,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <asm/atomic.h>
+#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 4877e35ae202..936ef82ed76a 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -50,7 +50,7 @@
extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
extern acpi_status pci_osc_support_set(u32 flags);
#else
-#if !defined(acpi_status)
+#if !defined(AE_ERROR)
typedef u32 acpi_status;
#define AE_ERROR (acpi_status) (0x0001)
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3a6a4e37a482..6fd36cb09160 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -442,6 +442,7 @@ struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int devic
struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
int pci_find_capability (struct pci_dev *dev, int cap);
int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
+int pci_find_ext_capability (struct pci_dev *dev, int cap);
struct pci_bus * pci_find_next_bus(const struct pci_bus *from);
struct pci_dev *pci_get_device (unsigned int vendor, unsigned int device, struct pci_dev *from);
@@ -662,6 +663,7 @@ static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
static inline void pci_unregister_driver(struct pci_driver *drv) { }
static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
+static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
/* Power management related routines */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 590dc6dca315..c3fe769c9129 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -935,6 +935,7 @@
#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151
#define PCI_DEVICE_ID_PLX_R753 0x1152
#define PCI_DEVICE_ID_PLX_OLITEC 0x1187
+#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
#define PCI_DEVICE_ID_PLX_9050 0x9050
#define PCI_DEVICE_ID_PLX_9080 0x9080
#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
@@ -1182,6 +1183,14 @@
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
+#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
+#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
+#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
+#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
+#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
+#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_TT128 0x9128
@@ -1827,6 +1836,7 @@
#define PCI_VENDOR_ID_SAMSUNG 0x144d
+#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 530ae3f4248c..fab5aed8ca31 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -73,11 +73,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
int vt_waitactive(int vt);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
-#ifdef CONFIG_VT
-int is_console_suspend_safe(void);
-#else
-static inline int is_console_suspend_safe(void) { return 1; }
-#endif
/*
* vc_screen.c shares this temporary buffer with the console write code so that
diff --git a/include/net/compat.h b/include/net/compat.h
index 8662b8f43df5..e65cbedb6abc 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -3,6 +3,8 @@
#include <linux/config.h>
+struct sock;
+
#if defined(CONFIG_COMPAT)
#include <linux/compat.h>
@@ -23,7 +25,6 @@ struct compat_cmsghdr {
compat_int_t cmsg_type;
};
-struct sock;
extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
#else /* defined(CONFIG_COMPAT) */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index d5926bfb1fc9..d5147770ad47 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -29,7 +29,7 @@
#include <linux/kernel.h> /* ARRAY_SIZE */
#include <linux/wireless.h>
-#define IEEE80211_VERSION "git-1.1.7"
+#define IEEE80211_VERSION "git-1.1.13"
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
@@ -104,6 +104,9 @@
#define IEEE80211_SCTL_FRAG 0x000F
#define IEEE80211_SCTL_SEQ 0xFFF0
+/* QOS control */
+#define IEEE80211_QCTL_TID 0x000F
+
/* debug macros */
#ifdef CONFIG_IEEE80211_DEBUG
@@ -1075,6 +1078,7 @@ struct ieee80211_device {
int (*handle_management) (struct net_device * dev,
struct ieee80211_network * network, u16 type);
+ int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
/* Typical STA methods */
int (*handle_auth) (struct net_device * dev,
@@ -1243,7 +1247,8 @@ extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
- struct ieee80211_hdr *frame, int len);
+ struct ieee80211_hdr *frame, int hdr_len,
+ int total_len, int encrypt_mpdu);
/* ieee80211_rx.c */
extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 052ed596a4e4..7a483ab4022f 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -86,6 +86,9 @@ struct ieee80211softmac_assoc_info {
/* BSSID we're trying to associate to */
char bssid[ETH_ALEN];
+
+ /* Rates supported by the network */
+ struct ieee80211softmac_ratesinfo supported_rates;
/* some flags.
* static_essid is valid if the essid is constant,
@@ -132,23 +135,26 @@ enum {
struct ieee80211softmac_txrates {
/* The Bit-Rate to be used for multicast frames. */
u8 mcast_rate;
- /* The Bit-Rate to be used for multicast fallback
- * (If the device supports fallback and hardware-retry)
- */
- u8 mcast_fallback;
+
+ /* The Bit-Rate to be used for multicast management frames. */
+ u8 mgt_mcast_rate;
+
/* The Bit-Rate to be used for any other (normal) data packet. */
u8 default_rate;
/* The Bit-Rate to be used for default fallback
* (If the device supports fallback and hardware-retry)
*/
u8 default_fallback;
+
+ /* This is the rate that the user asked for */
+ u8 user_rate;
};
/* Bits for txrates_change callback. */
#define IEEE80211SOFTMAC_TXRATECHG_DEFAULT (1 << 0) /* default_rate */
#define IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK (1 << 1) /* default_fallback */
#define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */
-#define IEEE80211SOFTMAC_TXRATECHG_MCAST_FBACK (1 << 3) /* mcast_fallback */
+#define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */
struct ieee80211softmac_device {
/* 802.11 structure for data stuff */
@@ -250,6 +256,28 @@ extern void ieee80211softmac_fragment_lost(struct net_device *dev,
* Note that the rates need to be sorted. */
extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates);
+/* Helper function which advises you the rate at which a frame should be
+ * transmitted at. */
+static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac,
+ int is_multicast,
+ int is_mgt)
+{
+ struct ieee80211softmac_txrates *txrates = &mac->txrates;
+
+ if (!mac->associated)
+ return txrates->mgt_mcast_rate;
+
+ /* We are associated, sending unicast frame */
+ if (!is_multicast)
+ return txrates->default_rate;
+
+ /* We are associated, sending multicast frame */
+ if (is_mgt)
+ return txrates->mgt_mcast_rate;
+ else
+ return txrates->mcast_rate;
+}
+
/* Start the SoftMAC. Call this after you initialized the device
* and it is ready to run.
*/
@@ -282,7 +310,7 @@ extern void ieee80211softmac_stop(struct net_device *dev);
* - context set to the context data you want passed
* The return value is 0, or an error.
*/
-typedef void (*notify_function_ptr)(struct net_device *dev, void *context);
+typedef void (*notify_function_ptr)(struct net_device *dev, int event_type, void *context);
#define ieee80211softmac_notify(dev, event, fun, context) ieee80211softmac_notify_gfp(dev, event, fun, context, GFP_KERNEL);
#define ieee80211softmac_notify_atomic(dev, event, fun, context) ieee80211softmac_notify_gfp(dev, event, fun, context, GFP_ATOMIC);
diff --git a/include/net/ieee80211softmac_wx.h b/include/net/ieee80211softmac_wx.h
index 3e0be453ecea..4ee3ad57283f 100644
--- a/include/net/ieee80211softmac_wx.h
+++ b/include/net/ieee80211softmac_wx.h
@@ -91,4 +91,9 @@ ieee80211softmac_wx_get_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra);
+extern int
+ieee80211softmac_wx_set_mlme(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra);
#endif /* _IEEE80211SOFTMAC_WX */
diff --git a/kernel/exit.c b/kernel/exit.c
index e95b93282210..e06d0c10a24e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITING;
- /*
- * Make sure we don't try to process any timer firings
- * while we are already exiting.
- */
- tsk->it_virt_expires = cputime_zero;
- tsk->it_prof_expires = cputime_zero;
- tsk->it_sched_expires = 0;
-
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index b7f0388bd71c..01fa2ae98a85 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -456,6 +456,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
return ret;
}
+EXPORT_SYMBOL_GPL(hrtimer_start);
/**
* hrtimer_try_to_cancel - try to deactivate a timer
@@ -484,6 +485,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
return ret;
}
+EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
/**
* hrtimer_cancel - cancel a timer and wait for the handler to finish.
@@ -504,6 +506,7 @@ int hrtimer_cancel(struct hrtimer *timer)
cpu_relax();
}
}
+EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
* hrtimer_get_remaining - get remaining time for the timer
@@ -522,6 +525,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
return rem;
}
+EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
#ifdef CONFIG_NO_IDLE_HZ
/**
@@ -580,6 +584,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
timer->base = &bases[clock_id];
timer->node.rb_parent = HRTIMER_INACTIVE;
}
+EXPORT_SYMBOL_GPL(hrtimer_init);
/**
* hrtimer_get_res - get the timer resolution for a clock
@@ -599,6 +604,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
return 0;
}
+EXPORT_SYMBOL_GPL(hrtimer_get_res);
/*
* Expire the per base hrtimer-queue:
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 520f6c59948d..d38d9ec3276c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -555,9 +555,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
struct cpu_timer_list *next;
unsigned long i;
- if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
- return;
-
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
@@ -1173,6 +1170,9 @@ static void check_process_timers(struct task_struct *tsk,
}
t = tsk;
do {
+ if (unlikely(t->flags & PF_EXITING))
+ continue;
+
ticks = cputime_add(cputime_add(t->utime, t->stime),
prof_left);
if (!cputime_eq(prof_expires, cputime_zero) &&
@@ -1193,11 +1193,7 @@ static void check_process_timers(struct task_struct *tsk,
t->it_sched_expires > sched)) {
t->it_sched_expires = sched;
}
-
- do {
- t = next_thread(t);
- } while (unlikely(t->flags & PF_EXITING));
- } while (t != tsk);
+ } while ((t = next_thread(t)) != tsk);
}
}
@@ -1289,30 +1285,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
#undef UNEXPIRED
- BUG_ON(tsk->exit_state);
-
/*
* Double-check with locks held.
*/
read_lock(&tasklist_lock);
- spin_lock(&tsk->sighand->siglock);
+ if (likely(tsk->signal != NULL)) {
+ spin_lock(&tsk->sighand->siglock);
- /*
- * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
- * all the timers that are firing, and put them on the firing list.
- */
- check_thread_timers(tsk, &firing);
- check_process_timers(tsk, &firing);
+ /*
+ * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
+ * all the timers that are firing, and put them on the firing list.
+ */
+ check_thread_timers(tsk, &firing);
+ check_process_timers(tsk, &firing);
- /*
- * We must release these locks before taking any timer's lock.
- * There is a potential race with timer deletion here, as the
- * siglock now protects our private firing list. We have set
- * the firing flag in each timer, so that a deletion attempt
- * that gets the timer lock before we do will give it up and
- * spin until we've taken care of that timer below.
- */
- spin_unlock(&tsk->sighand->siglock);
+ /*
+ * We must release these locks before taking any timer's lock.
+ * There is a potential race with timer deletion here, as the
+ * siglock now protects our private firing list. We have set
+ * the firing flag in each timer, so that a deletion attempt
+ * that gets the timer lock before we do will give it up and
+ * spin until we've taken care of that timer below.
+ */
+ spin_unlock(&tsk->sighand->siglock);
+ }
read_unlock(&tasklist_lock);
/*
diff --git a/kernel/power/main.c b/kernel/power/main.c
index a6d9ef46009e..0a907f0dc56b 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -86,6 +86,7 @@ static int suspend_prepare(suspend_state_t state)
goto Thaw;
}
+ suspend_console();
if ((error = device_suspend(PMSG_SUSPEND))) {
printk(KERN_ERR "Some devices failed to suspend\n");
goto Finish;
@@ -133,6 +134,7 @@ int suspend_enter(suspend_state_t state)
static void suspend_finish(suspend_state_t state)
{
device_resume();
+ resume_console();
thaw_processes();
enable_nonboot_cpus();
if (pm_ops && pm_ops->finish)
diff --git a/kernel/printk.c b/kernel/printk.c
index c056f3324432..19a955619294 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -67,6 +67,7 @@ EXPORT_SYMBOL(oops_in_progress);
* driver system.
*/
static DECLARE_MUTEX(console_sem);
+static DECLARE_MUTEX(secondary_console_sem);
struct console *console_drivers;
/*
* This is used for debugging the mess that is the VT code by
@@ -76,7 +77,7 @@ struct console *console_drivers;
* path in the console code where we end up in places I want
* locked without the console sempahore held
*/
-static int console_locked;
+static int console_locked, console_suspended;
/*
* logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars
@@ -698,6 +699,23 @@ int __init add_preferred_console(char *name, int idx, char *options)
}
/**
+ * suspend_console - suspend the console subsystem
+ *
+ * This disables printk() while we go into suspend states
+ */
+void suspend_console(void)
+{
+ acquire_console_sem();
+ console_suspended = 1;
+}
+
+void resume_console(void)
+{
+ console_suspended = 0;
+ release_console_sem();
+}
+
+/**
* acquire_console_sem - lock the console system for exclusive use.
*
* Acquires a semaphore which guarantees that the caller has
@@ -708,6 +726,10 @@ int __init add_preferred_console(char *name, int idx, char *options)
void acquire_console_sem(void)
{
BUG_ON(in_interrupt());
+ if (console_suspended) {
+ down(&secondary_console_sem);
+ return;
+ }
down(&console_sem);
console_locked = 1;
console_may_schedule = 1;
@@ -750,6 +772,10 @@ void release_console_sem(void)
unsigned long _con_start, _log_end;
unsigned long wake_klogd = 0;
+ if (console_suspended) {
+ up(&secondary_console_sem);
+ return;
+ }
for ( ; ; ) {
spin_lock_irqsave(&logbuf_lock, flags);
wake_klogd |= log_start - log_end;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1ae2b2cc3a54..70df5c0d957e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -91,8 +91,8 @@ static void grow_zone_span(struct zone *zone,
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
- if (end_pfn > old_zone_end_pfn)
- zone->spanned_pages = end_pfn - zone->zone_start_pfn;
+ zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
+ zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
@@ -106,8 +106,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
- if (end_pfn > old_pgdat_end_pfn)
- pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn;
+ pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
+ pgdat->node_start_pfn;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/shmem.c b/mm/shmem.c
index 4c5e68e4e9ae..1e43c8a865ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
+ dentry->d_inode->i_nlink--;
dir->i_nlink--;
return shmem_unlink(dir, dentry);
}
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
+ sb->s_time_gran = 1;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode)
diff --git a/mm/slab.c b/mm/slab.c
index d31a06bfbea5..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
-/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in cache_grow().
- */
-static unsigned long offslab_limit;
-
/*
* struct slab
*
@@ -1356,12 +1351,6 @@ void __init kmem_cache_init(void)
NULL, NULL);
}
- /* Inc off-slab bufctl limit until the ceiling is hit. */
- if (!(OFF_SLAB(sizes->cs_cachep))) {
- offslab_limit = sizes->cs_size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
- }
-
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -1780,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
+ unsigned long offslab_limit;
size_t left_over = 0;
int gfporder;
@@ -1791,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (!num)
continue;
- /* More than offslab_limit objects will cause problems */
- if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
- break;
+ if (flags & CFLGS_OFF_SLAB) {
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
+ offslab_limit = size - sizeof(struct slab);
+ offslab_limit /= sizeof(kmem_bufctl_t);
+
+ if (num > offslab_limit)
+ break;
+ }
/* Found something acceptable - save it away */
cachep->num = num;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4649a63a8cb6..440a733fe2e9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
loop_again:
total_scanned = 0;
nr_reclaimed = 0;
- sc.may_writepage = !laptop_mode,
+ sc.may_writepage = !laptop_mode;
sc.nr_mapped = read_page_state(nr_mapped);
inc_page_state(pageoutrun);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ad1c7af65ec8..f5d47bf4f967 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -300,25 +300,20 @@ int br_add_bridge(const char *name)
rtnl_lock();
if (strchr(dev->name, '%')) {
ret = dev_alloc_name(dev, dev->name);
- if (ret < 0)
- goto err1;
+ if (ret < 0) {
+ free_netdev(dev);
+ goto out;
+ }
}
ret = register_netdevice(dev);
if (ret)
- goto err2;
+ goto out;
ret = br_sysfs_addbr(dev);
if (ret)
- goto err3;
- rtnl_unlock();
- return 0;
-
- err3:
- unregister_netdev(dev);
- err2:
- free_netdev(dev);
- err1:
+ unregister_netdevice(dev);
+ out:
rtnl_unlock();
return ret;
}
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index b5981e5f6b00..8c211c58893b 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -452,6 +452,7 @@ found:
(unsigned long long)
avr->dccpavr_ack_ackno);
dccp_ackvec_throw_record(av, avr);
+ break;
}
/*
* If it wasn't received, continue scanning... we might
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 69b74a9a0fc3..7cef1d8ace27 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,6 +3,5 @@
#
obj-y += eth.o
-obj-$(CONFIG_SYSCTL) += sysctl_net_ether.o
obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o
obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
diff --git a/net/ethernet/sysctl_net_ether.c b/net/ethernet/sysctl_net_ether.c
deleted file mode 100644
index 66b39fc342d2..000000000000
--- a/net/ethernet/sysctl_net_ether.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/* -*- linux-c -*-
- * sysctl_net_ether.c: sysctl interface to net Ethernet subsystem.
- *
- * Begun April 1, 1996, Mike Shaver.
- * Added /proc/sys/net/ether directory entry (empty =) ). [MS]
- */
-
-#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <linux/if_ether.h>
-
-ctl_table ether_table[] = {
- {0}
-};
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 93def94c1b32..3fa5df2e1f0b 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -501,8 +501,11 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
{
struct ieee80211_hdr_4addr *hdr11;
+ u16 stype;
hdr11 = (struct ieee80211_hdr_4addr *)skb->data;
+ stype = WLAN_FC_GET_STYPE(le16_to_cpu(hdr11->frame_ctl));
+
switch (le16_to_cpu(hdr11->frame_ctl) &
(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_TODS:
@@ -523,7 +526,13 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
break;
}
- hdr[12] = 0; /* priority */
+ if (stype & IEEE80211_STYPE_QOS_DATA) {
+ const struct ieee80211_hdr_3addrqos *qoshdr =
+ (struct ieee80211_hdr_3addrqos *)skb->data;
+ hdr[12] = le16_to_cpu(qoshdr->qos_ctl) & IEEE80211_QCTL_TID;
+ } else
+ hdr[12] = 0; /* priority */
+
hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
}
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 604b7b0097bc..2bf567fd5a17 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -369,7 +369,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* Put this code here so that we avoid duplicating it in all
* Rx paths. - Jean II */
-#ifdef CONFIG_WIRELESS_EXT
#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
/* If spy monitoring on */
if (ieee->spy_data.spy_number > 0) {
@@ -398,7 +397,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
}
#endif /* IW_WIRELESS_SPY */
-#endif /* CONFIG_WIRELESS_EXT */
#ifdef NOT_YET
hostap_update_rx_stats(local->ap, hdr, rx_stats);
@@ -1692,8 +1690,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
WLAN_FC_GET_STYPE(le16_to_cpu
(header->frame_ctl)));
- IEEE80211_WARNING("%s: IEEE80211_REASSOC_REQ received\n",
- ieee->dev->name);
+ IEEE80211_DEBUG_MGMT("%s: IEEE80211_REASSOC_REQ received\n",
+ ieee->dev->name);
if (ieee->handle_reassoc_request != NULL)
ieee->handle_reassoc_request(ieee->dev,
(struct ieee80211_reassoc_request *)
@@ -1705,8 +1703,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
WLAN_FC_GET_STYPE(le16_to_cpu
(header->frame_ctl)));
- IEEE80211_WARNING("%s: IEEE80211_ASSOC_REQ received\n",
- ieee->dev->name);
+ IEEE80211_DEBUG_MGMT("%s: IEEE80211_ASSOC_REQ received\n",
+ ieee->dev->name);
if (ieee->handle_assoc_request != NULL)
ieee->handle_assoc_request(ieee->dev);
break;
@@ -1722,10 +1720,10 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu
(header->frame_ctl)));
- IEEE80211_WARNING("%s: Unknown management packet: %d\n",
- ieee->dev->name,
- WLAN_FC_GET_STYPE(le16_to_cpu
- (header->frame_ctl)));
+ IEEE80211_DEBUG_MGMT("%s: Unknown management packet: %d\n",
+ ieee->dev->name,
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
break;
}
}
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 8b4332f53394..6a5de1b84459 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -220,13 +220,43 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
return txb;
}
+static int ieee80211_classify(struct sk_buff *skb)
+{
+ struct ethhdr *eth;
+ struct iphdr *ip;
+
+ eth = (struct ethhdr *)skb->data;
+ if (eth->h_proto != __constant_htons(ETH_P_IP))
+ return 0;
+
+ ip = skb->nh.iph;
+ switch (ip->tos & 0xfc) {
+ case 0x20:
+ return 2;
+ case 0x40:
+ return 1;
+ case 0x60:
+ return 3;
+ case 0x80:
+ return 4;
+ case 0xa0:
+ return 5;
+ case 0xc0:
+ return 6;
+ case 0xe0:
+ return 7;
+ default:
+ return 0;
+ }
+}
+
/* Incoming skb is converted to a txb which consists of
* a block of 802.11 fragment packets (stored as skbs) */
int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
- struct ieee80211_hdr_3addr *frag_hdr;
+ struct ieee80211_hdr_3addrqos *frag_hdr;
int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
rts_required;
unsigned long flags;
@@ -234,9 +264,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
int bytes, fc, hdr_len;
struct sk_buff *skb_frag;
- struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */
+ struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
.duration_id = 0,
- .seq_ctl = 0
+ .seq_ctl = 0,
+ .qos_ctl = 0
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
struct ieee80211_crypt_data *crypt;
@@ -282,12 +313,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy(dest, skb->data, ETH_ALEN);
memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
- /* Advance the SKB to the start of the payload */
- skb_pull(skb, sizeof(struct ethhdr));
-
- /* Determine total amount of storage required for TXB packets */
- bytes = skb->len + SNAP_SIZE + sizeof(u16);
-
if (host_encrypt || host_build_iv)
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_PROTECTED;
@@ -306,9 +331,23 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy(header.addr2, src, ETH_ALEN);
memcpy(header.addr3, ieee->bssid, ETH_ALEN);
}
- header.frame_ctl = cpu_to_le16(fc);
hdr_len = IEEE80211_3ADDR_LEN;
+ if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
+ fc |= IEEE80211_STYPE_QOS_DATA;
+ hdr_len += 2;
+
+ skb->priority = ieee80211_classify(skb);
+ header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
+ }
+ header.frame_ctl = cpu_to_le16(fc);
+
+ /* Advance the SKB to the start of the payload */
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ /* Determine total amount of storage required for TXB packets */
+ bytes = skb->len + SNAP_SIZE + sizeof(u16);
+
/* Encrypt msdu first on the whole data packet. */
if ((host_encrypt || host_encrypt_msdu) &&
crypt && crypt->ops && crypt->ops->encrypt_msdu) {
@@ -402,7 +441,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
if (rts_required) {
skb_frag = txb->fragments[0];
frag_hdr =
- (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
+ (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
/*
* Set header frame_ctl to the RTS.
@@ -433,7 +472,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
crypt->ops->extra_mpdu_prefix_len);
frag_hdr =
- (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
+ (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
memcpy(frag_hdr, &header, hdr_len);
/* If this is not the last fragment, then add the MOREFRAGS
@@ -516,7 +555,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* Incoming 802.11 strucure is converted to a TXB
* a block of 802.11 fragment packets (stored as skbs) */
int ieee80211_tx_frame(struct ieee80211_device *ieee,
- struct ieee80211_hdr *frame, int len)
+ struct ieee80211_hdr *frame, int hdr_len, int total_len,
+ int encrypt_mpdu)
{
struct ieee80211_txb *txb = NULL;
unsigned long flags;
@@ -526,6 +566,9 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
spin_lock_irqsave(&ieee->lock, flags);
+ if (encrypt_mpdu && !ieee->sec.encrypt)
+ encrypt_mpdu = 0;
+
/* If there is no driver handler to take the TXB, dont' bother
* creating it... */
if (!ieee->hard_start_xmit) {
@@ -533,32 +576,41 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
goto success;
}
- if (unlikely(len < 24)) {
+ if (unlikely(total_len < 24)) {
printk(KERN_WARNING "%s: skb too small (%d).\n",
- ieee->dev->name, len);
+ ieee->dev->name, total_len);
goto success;
}
+ if (encrypt_mpdu)
+ frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
/* When we allocate the TXB we allocate enough space for the reserve
* and full fragment bytes (bytes_per_frag doesn't include prefix,
* postfix, header, FCS, etc.) */
- txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC);
+ txb = ieee80211_alloc_txb(1, total_len, ieee->tx_headroom, GFP_ATOMIC);
if (unlikely(!txb)) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = 0;
- txb->payload_size = len;
+ txb->payload_size = total_len;
skb_frag = txb->fragments[0];
- memcpy(skb_put(skb_frag, len), frame, len);
+ memcpy(skb_put(skb_frag, total_len), frame, total_len);
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
skb_put(skb_frag, 4);
+ /* To avoid overcomplicating things, we do the corner-case frame
+ * encryption in software. The only real situation where encryption is
+ * needed here is during software-based shared key authentication. */
+ if (encrypt_mpdu)
+ ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
+
success:
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index b885fd189403..a78c4f845f66 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -50,7 +50,8 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
char *p;
struct iw_event iwe;
int i, j;
- u8 max_rate, rate;
+ char *current_val; /* For rates */
+ u8 rate;
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
@@ -107,9 +108,13 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
/* Add basic and extended rates */
- max_rate = 0;
- p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ /* Rate : stuffing multiple values in a single event require a bit
+ * more of magic - Jean II */
+ current_val = start + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
for (i = 0, j = 0; i < network->rates_len;) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
@@ -117,28 +122,21 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
rate = network->rates_ex[j++] & 0x7F;
else
rate = network->rates[i++] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+ /* Bit rate given in 500 kb/s units (+ 0x80) */
+ iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
+ /* Add new value to event */
+ current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN);
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
- if (rate > max_rate)
- max_rate = rate;
+ /* Bit rate given in 500 kb/s units (+ 0x80) */
+ iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
+ /* Add new value to event */
+ current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN);
}
-
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- iwe.u.bitrate.value = max_rate * 500000;
- start = iwe_stream_add_event(start, stop, &iwe, IW_EV_PARAM_LEN);
-
- iwe.cmd = IWEVCUSTOM;
- iwe.u.data.length = p - custom;
- if (iwe.u.data.length)
- start = iwe_stream_add_point(start, stop, &iwe, custom);
+ /* Check if we added any rate */
+ if((current_val - start) > IW_EV_LCP_LEN)
+ start = current_val;
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
@@ -505,7 +503,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
len = sec->key_sizes[key];
memcpy(keybuf, sec->keys[key], len);
- erq->length = (len >= 0 ? len : 0);
+ erq->length = len;
erq->flags |= IW_ENCODE_ENABLED;
if (ieee->open_wep)
diff --git a/net/ieee80211/softmac/Kconfig b/net/ieee80211/softmac/Kconfig
index f2a27cc6ecb1..2811651cb134 100644
--- a/net/ieee80211/softmac/Kconfig
+++ b/net/ieee80211/softmac/Kconfig
@@ -2,6 +2,7 @@ config IEEE80211_SOFTMAC
tristate "Software MAC add-on to the IEEE 802.11 networking stack"
depends on IEEE80211 && EXPERIMENTAL
select WIRELESS_EXT
+ select IEEE80211_CRYPT_WEP
---help---
This option enables the hardware independent software MAC addon
for the IEEE 802.11 networking stack.
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index 57ea9f6f465c..5e9a90651d04 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -82,51 +82,52 @@ ieee80211softmac_assoc_timeout(void *d)
ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL);
}
-/* Sends out a disassociation request to the desired AP */
-static void
-ieee80211softmac_disassoc(struct ieee80211softmac_device *mac, u16 reason)
+void
+ieee80211softmac_disassoc(struct ieee80211softmac_device *mac)
{
unsigned long flags;
+
+ spin_lock_irqsave(&mac->lock, flags);
+ if (mac->associnfo.associating)
+ cancel_delayed_work(&mac->associnfo.timeout);
+
+ netif_carrier_off(mac->dev);
+
+ mac->associated = 0;
+ mac->associnfo.bssvalid = 0;
+ mac->associnfo.associating = 0;
+ ieee80211softmac_init_txrates(mac);
+ ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
+ spin_unlock_irqrestore(&mac->lock, flags);
+}
+
+/* Sends out a disassociation request to the desired AP */
+void
+ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason)
+{
struct ieee80211softmac_network *found;
if (mac->associnfo.bssvalid && mac->associated) {
found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
if (found)
ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason);
- } else if (mac->associnfo.associating) {
- cancel_delayed_work(&mac->associnfo.timeout);
}
- /* Change our state */
- spin_lock_irqsave(&mac->lock, flags);
- /* Do NOT clear bssvalid as that will break ieee80211softmac_assoc_work! */
- mac->associated = 0;
- mac->associnfo.associating = 0;
- ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
- spin_unlock_irqrestore(&mac->lock, flags);
+ ieee80211softmac_disassoc(mac);
}
static inline int
we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len)
{
- int idx, search, found;
- u8 rate, search_rate;
+ int idx;
+ u8 rate;
for (idx = 0; idx < (from_len); idx++) {
rate = (from)[idx];
if (!(rate & IEEE80211_BASIC_RATE_MASK))
continue;
- found = 0;
rate &= ~IEEE80211_BASIC_RATE_MASK;
- for (search = 0; search < mac->ratesinfo.count; search++) {
- search_rate = mac->ratesinfo.rates[search];
- search_rate &= ~IEEE80211_BASIC_RATE_MASK;
- if (rate == search_rate) {
- found = 1;
- break;
- }
- }
- if (!found)
+ if (!ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate))
return 0;
}
return 1;
@@ -163,12 +164,28 @@ network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_ne
}
static void
-ieee80211softmac_assoc_notify(struct net_device *dev, void *context)
+ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context)
{
struct ieee80211softmac_device *mac = ieee80211_priv(dev);
ieee80211softmac_assoc_work((void*)mac);
}
+static void
+ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void *context)
+{
+ struct ieee80211softmac_device *mac = ieee80211_priv(dev);
+
+ switch (event_type) {
+ case IEEE80211SOFTMAC_EVENT_AUTHENTICATED:
+ ieee80211softmac_assoc_work((void*)mac);
+ break;
+ case IEEE80211SOFTMAC_EVENT_AUTH_FAILED:
+ case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT:
+ ieee80211softmac_disassoc(mac);
+ break;
+ }
+}
+
/* This function is called to handle userspace requests (asynchronously) */
void
ieee80211softmac_assoc_work(void *d)
@@ -176,14 +193,18 @@ ieee80211softmac_assoc_work(void *d)
struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
struct ieee80211softmac_network *found = NULL;
struct ieee80211_network *net = NULL, *best = NULL;
+ int bssvalid;
unsigned long flags;
-
+
+ /* ieee80211_disassoc might clear this */
+ bssvalid = mac->associnfo.bssvalid;
+
/* meh */
if (mac->associated)
- ieee80211softmac_disassoc(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
+ ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
/* try to find the requested network in our list, if we found one already */
- if (mac->associnfo.bssvalid || mac->associnfo.bssfixed)
+ if (bssvalid || mac->associnfo.bssfixed)
found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
/* Search the ieee80211 networks for this network if we didn't find it by bssid,
@@ -244,7 +265,7 @@ ieee80211softmac_assoc_work(void *d)
* Maybe we can hope to have more memory after scanning finishes ;)
*/
dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n");
- ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify, NULL);
+ ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
if (ieee80211softmac_start_scan(mac))
dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
return;
@@ -279,7 +300,7 @@ ieee80211softmac_assoc_work(void *d)
* otherwise adding the notification would be racy. */
if (!ieee80211softmac_auth_req(mac, found)) {
dprintk(KERN_INFO PFX "cannot associate without being authenticated, requested authentication\n");
- ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify, NULL, GFP_KERNEL);
+ ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
} else {
printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n");
ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found);
@@ -297,6 +318,9 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
struct ieee80211softmac_network *net)
{
mac->associnfo.associating = 0;
+ mac->associnfo.supported_rates = net->supported_rates;
+ ieee80211softmac_recalc_txrates(mac);
+
mac->associated = 1;
if (mac->set_bssid_filter)
mac->set_bssid_filter(mac->dev, net->bssid);
@@ -380,7 +404,6 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
struct ieee80211_disassoc *disassoc)
{
struct ieee80211softmac_device *mac = ieee80211_priv(dev);
- unsigned long flags;
if (unlikely(!mac->running))
return -ENODEV;
@@ -392,14 +415,11 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
return 0;
dprintk(KERN_INFO PFX "got disassoc frame\n");
- netif_carrier_off(dev);
- spin_lock_irqsave(&mac->lock, flags);
- mac->associnfo.bssvalid = 0;
- mac->associated = 0;
- ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
+ ieee80211softmac_disassoc(mac);
+
+ /* try to reassociate */
schedule_work(&mac->associnfo.work);
- spin_unlock_irqrestore(&mac->lock, flags);
-
+
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 06e332624665..90b8484e509b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -107,6 +107,7 @@ ieee80211softmac_auth_queue(void *data)
printkl(KERN_WARNING PFX "Authentication timed out with "MAC_FMT"\n", MAC_ARG(net->bssid));
/* Remove this item from the queue */
spin_lock_irqsave(&mac->lock, flags);
+ net->authenticating = 0;
ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT, net);
cancel_delayed_work(&auth->work); /* just to make sure... */
list_del(&auth->list);
@@ -212,13 +213,13 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
spin_unlock_irqrestore(&mac->lock, flags);
- /* Switch to correct channel for this network */
- mac->set_channel(mac->dev, net->channel);
-
- /* Send our response (How to encrypt?) */
+ /* Send our response */
ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
- break;
+ return 0;
case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
+ kfree(net->challenge);
+ net->challenge = NULL;
+ net->challenge_len = 0;
/* Check the status code of the response */
switch(auth->status) {
case WLAN_STATUS_SUCCESS:
@@ -229,6 +230,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
spin_unlock_irqrestore(&mac->lock, flags);
printkl(KERN_NOTICE PFX "Shared Key Authentication completed with "MAC_FMT"\n",
MAC_ARG(net->bssid));
+ ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net);
break;
default:
printkl(KERN_NOTICE PFX "Shared Key Authentication with "MAC_FMT" failed, error code: %i\n",
@@ -279,6 +281,9 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
struct list_head *list_ptr;
unsigned long flags;
+ /* deauthentication implies disassociation */
+ ieee80211softmac_disassoc(mac);
+
/* Lock and reset status flags */
spin_lock_irqsave(&mac->lock, flags);
net->authenticating = 0;
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index 8cc8f3f0f8e7..f34fa2ef666b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -38,7 +38,8 @@
* The event context is private and can only be used from
* within this module. Its meaning varies with the event
* type:
- * SCAN_FINISHED: no special meaning
+ * SCAN_FINISHED,
+ * DISASSOCIATED: NULL
* ASSOCIATED,
* ASSOCIATE_FAILED,
* ASSOCIATE_TIMEOUT,
@@ -59,15 +60,15 @@
*/
static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
- "scan finished",
- "associated",
+ NULL, /* scan finished */
+ NULL, /* associated */
"associating failed",
"associating timed out",
"authenticated",
"authenticating failed",
"authenticating timed out",
"associating failed because no suitable network was found",
- "disassociated",
+ NULL, /* disassociated */
};
@@ -77,7 +78,7 @@ ieee80211softmac_notify_callback(void *d)
struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
kfree(d);
- event.fun(event.mac->dev, event.context);
+ event.fun(event.mac->dev, event.event_type, event.context);
}
int
@@ -136,30 +137,24 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
int we_event;
char *msg = NULL;
+ memset(&wrqu, '\0', sizeof (union iwreq_data));
+
switch(event) {
case IEEE80211SOFTMAC_EVENT_ASSOCIATED:
network = (struct ieee80211softmac_network *)event_ctx;
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
memcpy(wrqu.ap_addr.sa_data, &network->bssid[0], ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- we_event = SIOCGIWAP;
- break;
+ /* fall through */
case IEEE80211SOFTMAC_EVENT_DISASSOCIATED:
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- memset(&wrqu, '\0', sizeof (union iwreq_data));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
we_event = SIOCGIWAP;
break;
case IEEE80211SOFTMAC_EVENT_SCAN_FINISHED:
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- memset(&wrqu, '\0', sizeof (union iwreq_data));
we_event = SIOCGIWSCAN;
break;
default:
msg = event_descriptions[event];
+ if (!msg)
+ msg = "SOFTMAC EVENT BUG";
wrqu.data.length = strlen(msg);
we_event = IWEVCUSTOM;
break;
@@ -172,6 +167,9 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
if ((eventptr->event_type == event || eventptr->event_type == -1)
&& (eventptr->event_context == NULL || eventptr->event_context == event_ctx)) {
list_del(&eventptr->list);
+ /* User may have subscribed to ANY event, so
+ * we tell them which event triggered it. */
+ eventptr->event_type = event;
schedule_work(&eventptr->work);
}
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index cc6cd56c85b1..09541611e48c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -149,6 +149,56 @@ ieee80211softmac_hdr_3addr(struct ieee80211softmac_device *mac,
* shouldn't the sequence number be in ieee80211? */
}
+static u16
+ieee80211softmac_capabilities(struct ieee80211softmac_device *mac,
+ struct ieee80211softmac_network *net)
+{
+ u16 capability = 0;
+
+ /* ESS and IBSS bits are set according to the current mode */
+ switch (mac->ieee->iw_mode) {
+ case IW_MODE_INFRA:
+ capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
+ break;
+ case IW_MODE_ADHOC:
+ capability = cpu_to_le16(WLAN_CAPABILITY_IBSS);
+ break;
+ case IW_MODE_AUTO:
+ capability = net->capabilities &
+ (WLAN_CAPABILITY_ESS|WLAN_CAPABILITY_IBSS);
+ break;
+ default:
+ /* bleh. we don't ever go to these modes */
+ printk(KERN_ERR PFX "invalid iw_mode!\n");
+ break;
+ }
+
+ /* CF Pollable / CF Poll Request */
+ /* Needs to be implemented, for now, the 0's == not supported */
+
+ /* Privacy Bit */
+ capability |= mac->ieee->sec.level ?
+ cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
+
+ /* Short Preamble */
+ /* Always supported: we probably won't ever be powering devices which
+ * dont support this... */
+ capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+
+ /* PBCC */
+ /* Not widely used */
+
+ /* Channel Agility */
+ /* Not widely used */
+
+ /* Short Slot */
+ /* Will be implemented later */
+
+ /* DSSS-OFDM */
+ /* Not widely used */
+
+ return capability;
+}
/*****************************************************************************
* Create Management packets
@@ -179,27 +229,6 @@ ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
return 0;
ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid);
- /* Fill in capability Info */
- switch (mac->ieee->iw_mode) {
- case IW_MODE_INFRA:
- (*pkt)->capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
- break;
- case IW_MODE_ADHOC:
- (*pkt)->capability = cpu_to_le16(WLAN_CAPABILITY_IBSS);
- break;
- case IW_MODE_AUTO:
- (*pkt)->capability = net->capabilities & (WLAN_CAPABILITY_ESS|WLAN_CAPABILITY_IBSS);
- break;
- default:
- /* bleh. we don't ever go to these modes */
- printk(KERN_ERR PFX "invalid iw_mode!\n");
- break;
- }
- /* Need to add this
- (*pkt)->capability |= mac->ieee->short_slot ?
- cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME) : 0;
- */
- (*pkt)->capability |= mac->ieee->sec.level ? cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
/* Fill in Listen Interval (?) */
(*pkt)->listen_interval = cpu_to_le16(10);
@@ -239,17 +268,9 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
return 0;
ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid);
- /* Fill in capability Info */
- (*pkt)->capability = mac->ieee->iw_mode == IW_MODE_MASTER ?
- cpu_to_le16(WLAN_CAPABILITY_ESS) :
- cpu_to_le16(WLAN_CAPABILITY_IBSS);
- /*
- (*pkt)->capability |= mac->ieee->short_slot ?
- cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME) : 0;
- */
- (*pkt)->capability |= mac->ieee->sec.level ?
- cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
-
+ /* Fill in the capabilities */
+ (*pkt)->capability = ieee80211softmac_capabilities(mac, net);
+
/* Fill in Listen Interval (?) */
(*pkt)->listen_interval = cpu_to_le16(10);
/* Fill in the current AP MAC */
@@ -268,26 +289,27 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
static u32
ieee80211softmac_auth(struct ieee80211_auth **pkt,
struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
- u16 transaction, u16 status)
+ u16 transaction, u16 status, int *encrypt_mpdu)
{
u8 *data;
+ int auth_mode = mac->ieee->sec.auth_mode;
+ int is_shared_response = (auth_mode == WLAN_AUTH_SHARED_KEY
+ && transaction == IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE);
+
/* Allocate Packet */
(*pkt) = (struct ieee80211_auth *)ieee80211softmac_alloc_mgt(
2 + /* Auth Algorithm */
2 + /* Auth Transaction Seq */
2 + /* Status Code */
/* Challenge Text IE */
- mac->ieee->open_wep ? 0 :
- 1 + 1 + WLAN_AUTH_CHALLENGE_LEN
- );
+ is_shared_response ? 0 : 1 + 1 + net->challenge_len
+ );
if (unlikely((*pkt) == NULL))
return 0;
ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid);
/* Algorithm */
- (*pkt)->algorithm = mac->ieee->open_wep ?
- cpu_to_le16(WLAN_AUTH_OPEN) :
- cpu_to_le16(WLAN_AUTH_SHARED_KEY);
+ (*pkt)->algorithm = cpu_to_le16(auth_mode);
/* Transaction */
(*pkt)->transaction = cpu_to_le16(transaction);
/* Status */
@@ -295,18 +317,20 @@ ieee80211softmac_auth(struct ieee80211_auth **pkt,
data = (u8 *)(*pkt)->info_element;
/* Challenge Text */
- if(!mac->ieee->open_wep){
+ if (is_shared_response) {
*data = MFIE_TYPE_CHALLENGE;
data++;
/* Copy the challenge in */
- // *data = challenge length
- // data += sizeof(u16);
- // memcpy(data, challenge, challenge length);
- // data += challenge length;
-
- /* Add the full size to the packet length */
- }
+ *data = net->challenge_len;
+ data++;
+ memcpy(data, net->challenge, net->challenge_len);
+ data += net->challenge_len;
+
+ /* Make sure this frame gets encrypted with the shared key */
+ *encrypt_mpdu = 1;
+ } else
+ *encrypt_mpdu = 0;
/* Return the packet size */
return (data - (u8 *)(*pkt));
@@ -396,6 +420,7 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
{
void *pkt = NULL;
u32 pkt_size = 0;
+ int encrypt_mpdu = 0;
switch(type) {
case IEEE80211_STYPE_ASSOC_REQ:
@@ -405,7 +430,7 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
pkt_size = ieee80211softmac_reassoc_req((struct ieee80211_reassoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg);
break;
case IEEE80211_STYPE_AUTH:
- pkt_size = ieee80211softmac_auth((struct ieee80211_auth **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, (u16)(arg & 0xFFFF), (u16) (arg >> 16));
+ pkt_size = ieee80211softmac_auth((struct ieee80211_auth **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, (u16)(arg & 0xFFFF), (u16) (arg >> 16), &encrypt_mpdu);
break;
case IEEE80211_STYPE_DISASSOC:
case IEEE80211_STYPE_DEAUTH:
@@ -434,52 +459,8 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
* or get rid of it alltogether?
* Does this work for you now?
*/
- ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *)pkt, pkt_size);
-
- kfree(pkt);
- return 0;
-}
-
-
-/* Create an rts/cts frame */
-static u32
-ieee80211softmac_rts_cts(struct ieee80211_hdr_2addr **pkt,
- struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
- u32 type)
-{
- /* Allocate Packet */
- (*pkt) = kmalloc(IEEE80211_2ADDR_LEN, GFP_ATOMIC);
- memset(*pkt, 0, IEEE80211_2ADDR_LEN);
- if((*pkt) == NULL)
- return 0;
- ieee80211softmac_hdr_2addr(mac, (*pkt), type, net->bssid);
- return IEEE80211_2ADDR_LEN;
-}
-
-
-/* Sends a control packet */
-static int
-ieee80211softmac_send_ctl_frame(struct ieee80211softmac_device *mac,
- struct ieee80211softmac_network *net, u32 type, u32 arg)
-{
- void *pkt = NULL;
- u32 pkt_size = 0;
-
- switch(type) {
- case IEEE80211_STYPE_RTS:
- case IEEE80211_STYPE_CTS:
- pkt_size = ieee80211softmac_rts_cts((struct ieee80211_hdr_2addr **)(&pkt), mac, net, type);
- break;
- default:
- printkl(KERN_DEBUG PFX "Unsupported Control Frame type: %i\n", type);
- return -EINVAL;
- }
-
- if(pkt_size == 0)
- return -ENOMEM;
-
- /* Send the packet to the ieee80211 layer for tx */
- ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *) pkt, pkt_size);
+ ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *)pkt,
+ IEEE80211_3ADDR_LEN, pkt_size, encrypt_mpdu);
kfree(pkt);
return 0;
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 6252be2c0db9..4b2e57d12418 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -26,6 +26,7 @@
#include "ieee80211softmac_priv.h"
#include <linux/sort.h>
+#include <linux/etherdevice.h>
struct net_device *alloc_ieee80211softmac(int sizeof_priv)
{
@@ -61,14 +62,6 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
- //TODO: The mcast rate has to be assigned dynamically somewhere (in scanning, association. Not sure...)
- // It has to be set to the highest rate all stations in the current network can handle.
- softmac->txrates.mcast_rate = IEEE80211_CCK_RATE_1MB;
- softmac->txrates.mcast_fallback = IEEE80211_CCK_RATE_1MB;
- /* This is reassigned in ieee80211softmac_start to sane values. */
- softmac->txrates.default_rate = IEEE80211_CCK_RATE_1MB;
- softmac->txrates.default_fallback = IEEE80211_CCK_RATE_1MB;
-
/* to start with, we can't send anything ... */
netif_carrier_off(dev);
@@ -170,15 +163,82 @@ static void ieee80211softmac_start_check_rates(struct ieee80211softmac_device *m
}
}
-void ieee80211softmac_start(struct net_device *dev)
+int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate)
+{
+ int search;
+ u8 search_rate;
+
+ for (search = 0; search < ri->count; search++) {
+ search_rate = ri->rates[search];
+ search_rate &= ~IEEE80211_BASIC_RATE_MASK;
+ if (rate == search_rate)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Finds the highest rate which is:
+ * 1. Present in ri (optionally a basic rate)
+ * 2. Supported by the device
+ * 3. Less than or equal to the user-defined rate
+ */
+static u8 highest_supported_rate(struct ieee80211softmac_device *mac,
+ struct ieee80211softmac_ratesinfo *ri, int basic_only)
+{
+ u8 user_rate = mac->txrates.user_rate;
+ int i;
+
+ if (ri->count == 0) {
+ dprintk(KERN_ERR PFX "empty ratesinfo?\n");
+ return IEEE80211_CCK_RATE_1MB;
+ }
+
+ for (i = ri->count - 1; i >= 0; i--) {
+ u8 rate = ri->rates[i];
+ if (basic_only && !(rate & IEEE80211_BASIC_RATE_MASK))
+ continue;
+ rate &= ~IEEE80211_BASIC_RATE_MASK;
+ if (rate > user_rate)
+ continue;
+ if (ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate))
+ return rate;
+ }
+
+ /* If we haven't found a suitable rate by now, just trust the user */
+ return user_rate;
+}
+
+void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac)
+{
+ struct ieee80211softmac_txrates *txrates = &mac->txrates;
+ struct ieee80211softmac_txrates oldrates;
+ u32 change = 0;
+
+ if (mac->txrates_change)
+ oldrates = mac->txrates;
+
+ change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
+ txrates->default_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 0);
+
+ change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
+ txrates->default_fallback = lower_rate(mac, txrates->default_rate);
+
+ change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
+ txrates->mcast_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 1);
+
+ if (mac->txrates_change)
+ mac->txrates_change(mac->dev, change, &oldrates);
+
+}
+
+void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
{
- struct ieee80211softmac_device *mac = ieee80211_priv(dev);
struct ieee80211_device *ieee = mac->ieee;
u32 change = 0;
+ struct ieee80211softmac_txrates *txrates = &mac->txrates;
struct ieee80211softmac_txrates oldrates;
- ieee80211softmac_start_check_rates(mac);
-
/* TODO: We need some kind of state machine to lower the default rates
* if we loose too many packets.
*/
@@ -193,22 +253,37 @@ void ieee80211softmac_start(struct net_device *dev)
more reliable. Note similar logic in
ieee80211softmac_wx_set_rate() */
if (ieee->modulation & IEEE80211_CCK_MODULATION) {
- mac->txrates.default_rate = IEEE80211_CCK_RATE_11MB;
- change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
- mac->txrates.default_fallback = IEEE80211_CCK_RATE_5MB;
- change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
+ txrates->user_rate = IEEE80211_CCK_RATE_11MB;
} else if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
- mac->txrates.default_rate = IEEE80211_OFDM_RATE_54MB;
- change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
- mac->txrates.default_fallback = IEEE80211_OFDM_RATE_24MB;
- change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
+ txrates->user_rate = IEEE80211_OFDM_RATE_54MB;
} else
assert(0);
+
+ txrates->default_rate = IEEE80211_CCK_RATE_1MB;
+ change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
+
+ txrates->default_fallback = IEEE80211_CCK_RATE_1MB;
+ change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
+
+ txrates->mcast_rate = IEEE80211_CCK_RATE_1MB;
+ change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
+
+ txrates->mgt_mcast_rate = IEEE80211_CCK_RATE_1MB;
+ change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST;
+
if (mac->txrates_change)
- mac->txrates_change(dev, change, &oldrates);
+ mac->txrates_change(mac->dev, change, &oldrates);
mac->running = 1;
}
+
+void ieee80211softmac_start(struct net_device *dev)
+{
+ struct ieee80211softmac_device *mac = ieee80211_priv(dev);
+
+ ieee80211softmac_start_check_rates(mac);
+ ieee80211softmac_init_txrates(mac);
+}
EXPORT_SYMBOL_GPL(ieee80211softmac_start);
void ieee80211softmac_stop(struct net_device *dev)
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 65d9816c8ecc..fa1f8e3acfc0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -116,7 +116,10 @@ ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
struct ieee80211softmac_essid *essid);
/* Rates related */
+int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate);
u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta);
+void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac);
+void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac);
static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) {
return ieee80211softmac_lower_rate_delta(mac, rate, 1);
}
@@ -150,6 +153,8 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
struct ieee80211_reassoc_request * reassoc);
void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
+void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
/* some helper functions */
static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm)
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 27edb2b5581a..22aa6199185b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -211,8 +211,8 @@ ieee80211softmac_wx_set_rate(struct net_device *net_dev,
if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION))
goto out_unlock;
- mac->txrates.default_rate = rate;
- mac->txrates.default_fallback = lower_rate(mac, rate);
+ mac->txrates.user_rate = rate;
+ ieee80211softmac_recalc_txrates(mac);
err = 0;
out_unlock:
@@ -431,3 +431,35 @@ ieee80211softmac_wx_get_genie(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie);
+int
+ieee80211softmac_wx_set_mlme(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ struct ieee80211softmac_device *mac = ieee80211_priv(dev);
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
+ u16 reason = cpu_to_le16(mlme->reason_code);
+ struct ieee80211softmac_network *net;
+
+ if (memcmp(mac->associnfo.bssid, mlme->addr.sa_data, ETH_ALEN)) {
+ printk(KERN_DEBUG PFX "wx_set_mlme: requested operation on net we don't use\n");
+ return -EINVAL;
+ }
+
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ net = ieee80211softmac_get_network_by_bssid_locked(mac, mlme->addr.sa_data);
+ if (!net) {
+ printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n");
+ return -EINVAL;
+ }
+ return ieee80211softmac_deauth_req(mac, net, reason);
+ case IW_MLME_DISASSOC:
+ ieee80211softmac_send_disassoc_req(mac, reason);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_mlme);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 0923add122b4..9f0bb529ab70 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -116,6 +116,7 @@ sr_failed:
too_many_hops:
/* Tell the sender its packet died... */
+ IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
drop:
kfree_skb(skb);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index b72fa55dfb84..ba7c63ca5bb1 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -135,7 +135,8 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
/* Do additive increase */
if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
- tp->snd_cwnd_cnt += ca->ai;
+ /* cwnd = cwnd + a(w) / cwnd */
+ tp->snd_cwnd_cnt += ca->ai + 1;
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
tp->snd_cwnd_cnt -= tp->snd_cwnd;
tp->snd_cwnd++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a538bc1683d..b5521a9d3dc1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
* Hence, we can detect timed out packets during fast
* retransmit without falling to slow start.
*/
- if (tcp_head_timedout(sk, tp)) {
+ if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
struct sk_buff *skb;
skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
@@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
- if (IsReno(tp))
- tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
/* clear xmit_retrans hint */
if (tp->retransmit_skb_hint &&
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 743016baa048..f33c9dddaa12 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -642,7 +642,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
* eventually). The difference is that pulled data not copied, but
* immediately discarded.
*/
-static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
+static void __pskb_trim_head(struct sk_buff *skb, int len)
{
int i, k, eat;
@@ -667,7 +667,6 @@ static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
skb->tail = skb->data;
skb->data_len -= len;
skb->len = skb->data_len;
- return skb->tail;
}
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
@@ -676,12 +675,11 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM;
- if (len <= skb_headlen(skb)) {
+ /* If len == headlen, we avoid __skb_pull to preserve alignment. */
+ if (unlikely(len < skb_headlen(skb)))
__skb_pull(skb, len);
- } else {
- if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL)
- return -ENOMEM;
- }
+ else
+ __pskb_trim_head(skb, len - skb_headlen(skb));
TCP_SKB_CB(skb)->seq += len;
skb->ip_summed = CHECKSUM_HW;
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 7029618f5719..a16528657b4c 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -884,7 +884,8 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
if (now) {
/* Send down empty frame to trigger speed change */
skb = dev_alloc_skb(0);
- irlap_queue_xmit(self, skb);
+ if (skb)
+ irlap_queue_xmit(self, skb);
}
}
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 55538f6b60ff..58a1b6b42ddd 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -37,14 +37,6 @@ struct ctl_table net_table[] = {
.mode = 0555,
.child = core_table,
},
-#ifdef CONFIG_NET
- {
- .ctl_name = NET_ETHER,
- .procname = "ethernet",
- .mode = 0555,
- .child = ether_table,
- },
-#endif
#ifdef CONFIG_INET
{
.ctl_name = NET_IPV4,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 21dad415b896..90b4cdc0c948 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4422,6 +4422,7 @@ void selinux_complete_init(void)
/* Set up any superblocks initialized prior to the policy load. */
printk(KERN_INFO "SELinux: Setting up existing superblocks.\n");
+ spin_lock(&sb_lock);
spin_lock(&sb_security_lock);
next_sb:
if (!list_empty(&superblock_security_head)) {
@@ -4430,19 +4431,20 @@ next_sb:
struct superblock_security_struct,
list);
struct super_block *sb = sbsec->sb;
- spin_lock(&sb_lock);
sb->s_count++;
- spin_unlock(&sb_lock);
spin_unlock(&sb_security_lock);
+ spin_unlock(&sb_lock);
down_read(&sb->s_umount);
if (sb->s_root)
superblock_doinit(sb, NULL);
drop_super(sb);
+ spin_lock(&sb_lock);
spin_lock(&sb_security_lock);
list_del_init(&sbsec->list);
goto next_sb;
}
spin_unlock(&sb_security_lock);
+ spin_unlock(&sb_lock);
}
/* SELinux requires early initialization in order to label