aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/irda/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 14:45:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 14:45:08 -0700
commitaae3dbb4776e7916b6cd442d00159bea27a695c1 (patch)
treed074c5d783a81e7e2e084b1eba77f57459da7e37 /drivers/staging/irda/drivers
parentMerge tag 'wberr-v4.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/jlayton/linux (diff)
parentMerge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue (diff)
downloadlinux-dev-aae3dbb4776e7916b6cd442d00159bea27a695c1.tar.xz
linux-dev-aae3dbb4776e7916b6cd442d00159bea27a695c1.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Support ipv6 checksum offload in sunvnet driver, from Shannon Nelson. 2) Move to RB-tree instead of custom AVL code in inetpeer, from Eric Dumazet. 3) Allow generic XDP to work on virtual devices, from John Fastabend. 4) Add bpf device maps and XDP_REDIRECT, which can be used to build arbitrary switching frameworks using XDP. From John Fastabend. 5) Remove UFO offloads from the tree, gave us little other than bugs. 6) Remove the IPSEC flow cache, from Florian Westphal. 7) Support ipv6 route offload in mlxsw driver. 8) Support VF representors in bnxt_en, from Sathya Perla. 9) Add support for forward error correction modes to ethtool, from Vidya Sagar Ravipati. 10) Add time filter for packet scheduler action dumping, from Jamal Hadi Salim. 11) Extend the zerocopy sendmsg() used by virtio and tap to regular sockets via MSG_ZEROCOPY. From Willem de Bruijn. 12) Significantly rework value tracking in the BPF verifier, from Edward Cree. 13) Add new jump instructions to eBPF, from Daniel Borkmann. 14) Rework rtnetlink plumbing so that operations can be run without taking the RTNL semaphore. From Florian Westphal. 15) Support XDP in tap driver, from Jason Wang. 16) Add 32-bit eBPF JIT for ARM, from Shubham Bansal. 17) Add Huawei hinic ethernet driver. 18) Allow to report MD5 keys in TCP inet_diag dumps, from Ivan Delalande. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1780 commits) i40e: point wb_desc at the nvm_wb_desc during i40e_read_nvm_aq i40e: avoid NVM acquire deadlock during NVM update drivers: net: xgene: Remove return statement from void function drivers: net: xgene: Configure tx/rx delay for ACPI drivers: net: xgene: Read tx/rx delay for ACPI rocker: fix kcalloc parameter order rds: Fix non-atomic operation on shared flag variable net: sched: don't use GFP_KERNEL under spin lock vhost_net: correctly check tx avail during rx busy polling net: mdio-mux: add mdio_mux parameter to mdio_mux_init() rxrpc: Make service connection lookup always check for retry net: stmmac: Delete dead code for MDIO registration gianfar: Fix Tx flow control deactivation cxgb4: Ignore MPS_TX_INT_CAUSE[Bubble] for T6 cxgb4: Fix pause frame count in t4_get_port_stats cxgb4: fix memory leak tun: rename generic_xdp to skb_xdp tun: reserve extra headroom only when XDP is set net: dsa: bcm_sf2: Configure IMP port TC2QOS mapping net: dsa: bcm_sf2: Advertise number of egress queues ...
Diffstat (limited to 'drivers/staging/irda/drivers')
-rw-r--r--drivers/staging/irda/drivers/Kconfig398
-rw-r--r--drivers/staging/irda/drivers/Makefile44
-rw-r--r--drivers/staging/irda/drivers/act200l-sir.c250
-rw-r--r--drivers/staging/irda/drivers/actisys-sir.c245
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c2218
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.h227
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c989
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c817
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.h93
-rw-r--r--drivers/staging/irda/drivers/donauboe.c1732
-rw-r--r--drivers/staging/irda/drivers/donauboe.h362
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c157
-rw-r--r--drivers/staging/irda/drivers/girbil-sir.c252
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c1914
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h174
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.c570
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.h34
-rw-r--r--drivers/staging/irda/drivers/kingsun-sir.c634
-rw-r--r--drivers/staging/irda/drivers/ks959-sir.c912
-rw-r--r--drivers/staging/irda/drivers/ksdazzle-sir.c813
-rw-r--r--drivers/staging/irda/drivers/litelink-sir.c199
-rw-r--r--drivers/staging/irda/drivers/ma600-sir.c253
-rw-r--r--drivers/staging/irda/drivers/mcp2120-sir.c224
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c987
-rw-r--r--drivers/staging/irda/drivers/mcs7780.h165
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.c2410
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.h281
-rw-r--r--drivers/staging/irda/drivers/old_belkin-sir.c146
-rw-r--r--drivers/staging/irda/drivers/pxaficp_ir.c1076
-rw-r--r--drivers/staging/irda/drivers/sa1100_ir.c1150
-rw-r--r--drivers/staging/irda/drivers/sh_sir.c810
-rw-r--r--drivers/staging/irda/drivers/sir-dev.h191
-rw-r--r--drivers/staging/irda/drivers/sir_dev.c987
-rw-r--r--drivers/staging/irda/drivers/sir_dongle.c133
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.c3026
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.h191
-rw-r--r--drivers/staging/irda/drivers/smsc-sio.h100
-rw-r--r--drivers/staging/irda/drivers/stir4200.c1134
-rw-r--r--drivers/staging/irda/drivers/tekram-sir.c225
-rw-r--r--drivers/staging/irda/drivers/toim3232-sir.c358
-rw-r--r--drivers/staging/irda/drivers/via-ircc.c1593
-rw-r--r--drivers/staging/irda/drivers/via-ircc.h846
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c1872
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.h757
-rw-r--r--drivers/staging/irda/drivers/w83977af.h53
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.c1285
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.h198
47 files changed, 33485 insertions, 0 deletions
diff --git a/drivers/staging/irda/drivers/Kconfig b/drivers/staging/irda/drivers/Kconfig
new file mode 100644
index 000000000000..e070e1222733
--- /dev/null
+++ b/drivers/staging/irda/drivers/Kconfig
@@ -0,0 +1,398 @@
+menu "Infrared-port device drivers"
+ depends on IRDA!=n
+
+comment "SIR device drivers"
+
+config IRTTY_SIR
+ tristate "IrTTY (uses Linux serial driver)"
+ depends on IRDA && TTY
+ help
+ Say Y here if you want to build support for the IrTTY line
+ discipline. To compile it as a module, choose M here: the module
+ will be called irtty-sir. IrTTY makes it possible to use Linux's
+ own serial driver for all IrDA ports that are 16550 compatible.
+ Most IrDA chips are 16550 compatible so you should probably say Y
+ to this option. Using IrTTY will however limit the speed of the
+ connection to 115200 bps (IrDA SIR mode).
+
+ If unsure, say Y.
+
+config BFIN_SIR
+ tristate "Blackfin SIR on UART"
+ depends on BLACKFIN && IRDA
+ default n
+ help
+ Say Y here if your want to enable SIR function on Blackfin UART
+ devices.
+
+ To activate this driver you can start irattach like:
+ "irattach irda0 -s"
+
+ Saying M, it will be built as a module named bfin_sir.
+
+ Note that you need to turn off one of the serial drivers for SIR
+ to use that UART.
+
+config BFIN_SIR0
+ bool "Blackfin SIR on UART0"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART0
+
+config BFIN_SIR1
+ bool "Blackfin SIR on UART1"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART1 && (!BF531 && !BF532 && !BF533 && !BF561)
+
+config BFIN_SIR2
+ bool "Blackfin SIR on UART2"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART2 && (BF54x || BF538 || BF539)
+
+config BFIN_SIR3
+ bool "Blackfin SIR on UART3"
+ depends on BFIN_SIR && !SERIAL_BFIN_UART3 && (BF54x)
+
+choice
+ prompt "SIR Mode"
+ depends on BFIN_SIR
+ default SIR_BFIN_DMA
+
+config SIR_BFIN_DMA
+ bool "DMA mode"
+ depends on !DMA_UNCACHED_NONE
+
+config SIR_BFIN_PIO
+ bool "PIO mode"
+endchoice
+
+config SH_SIR
+ tristate "SuperH SIR on UART"
+ depends on IRDA && SUPERH && \
+ (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
+ CPU_SUBTYPE_SH7724)
+ default n
+ help
+ Say Y here if your want to enable SIR function on SuperH UART
+ devices.
+
+comment "Dongle support"
+
+config DONGLE
+ bool "Serial dongle support"
+ depends on IRTTY_SIR
+ help
+ Say Y here if you have an infrared device that connects to your
+ computer's serial port. These devices are called dongles. Then say Y
+ or M to the driver for your particular dongle below.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about serial dongles.
+
+config ESI_DONGLE
+ tristate "ESI JetEye PC dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Extended Systems
+ JetEye PC dongle. To compile it as a module, choose M here. The ESI
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for ESI
+ dongles you will have to start irattach like this:
+ "irattach -d esi".
+
+config ACTISYS_DONGLE
+ tristate "ACTiSYS IR-220L and IR220L+ dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-220L and
+ IR220L+ dongles. To compile it as a module, choose M here. The
+ ACTiSYS dongles attaches to the normal 9-pin serial port connector,
+ and can currently only be used by IrTTY. To activate support for
+ ACTiSYS dongles you will have to start irattach like this:
+ "irattach -d actisys" or "irattach -d actisys+".
+
+config TEKRAM_DONGLE
+ tristate "Tekram IrMate 210B dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Tekram IrMate 210B
+ dongle. To compile it as a module, choose M here. The Tekram dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Tekram
+ dongles you will have to start irattach like this:
+ "irattach -d tekram".
+
+config TOIM3232_DONGLE
+ tristate "TOIM3232 IrDa dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Vishay/Temic
+ TOIM3232 and TOIM4232 based dongles.
+ To compile it as a module, choose M here.
+
+config LITELINK_DONGLE
+ tristate "Parallax LiteLink dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Parallax Litelink
+ dongle. To compile it as a module, choose M here. The Parallax
+ dongle attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. To activate support for Parallax
+ dongles you will have to start irattach like this:
+ "irattach -d litelink".
+
+config MA600_DONGLE
+ tristate "Mobile Action MA600 dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Mobile Action MA600
+ dongle. To compile it as a module, choose M here. The MA600 dongle
+ attaches to the normal 9-pin serial port connector, and can
+ currently only be used by IrTTY. The driver should also support
+ the MA620 USB version of the dongle, if the integrated USB-to-RS232
+ converter is supported by usbserial. To activate support for
+ MA600 dongle you will have to start irattach like this:
+ "irattach -d ma600".
+
+config GIRBIL_DONGLE
+ tristate "Greenwich GIrBIL dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Greenwich GIrBIL
+ dongle. If you want to compile it as a module, choose M here.
+ The Greenwich dongle attaches to the normal 9-pin serial port
+ connector, and can currently only be used by IrTTY. To activate
+ support for Greenwich dongles you will have to start irattach
+ like this: "irattach -d girbil".
+
+config MCP2120_DONGLE
+ tristate "Microchip MCP2120"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Microchip MCP2120
+ dongle. If you want to compile it as a module, choose M here.
+ The MCP2120 dongle attaches to the normal 9-pin serial port
+ connector, and can currently only be used by IrTTY. To activate
+ support for MCP2120 dongles you will have to start irattach
+ like this: "irattach -d mcp2120".
+
+ You must build this dongle yourself. For more information see:
+ <http://www.eyetap.org/~tangf/irda_sir_linux.html>
+
+config OLD_BELKIN_DONGLE
+ tristate "Old Belkin dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Adaptec Airport 1000
+ and 2000 dongles. If you want to compile it as a module, choose
+ M here. Some information is contained in the comments
+ at the top of <file:drivers/net/irda/old_belkin-sir.c>.
+
+config ACT200L_DONGLE
+ tristate "ACTiSYS IR-200L dongle"
+ depends on IRTTY_SIR && DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the ACTiSYS IR-200L
+ dongle. If you want to compile it as a module, choose M here.
+ The ACTiSYS IR-200L dongle attaches to the normal 9-pin serial
+ port connector, and can currently only be used by IrTTY.
+ To activate support for ACTiSYS IR-200L dongle you will have to
+ start irattach like this: "irattach -d act200l".
+
+config KINGSUN_DONGLE
+ tristate "KingSun/DonShine DS-620 IrDA-USB dongle"
+ depends on IRDA && USB
+ help
+ Say Y or M here if you want to build support for the KingSun/DonShine
+ DS-620 IrDA-USB bridge device driver.
+
+ This USB bridge does not conform to the IrDA-USB device class
+ specification, and therefore needs its own specific driver. This
+ dongle supports SIR speed only (9600 bps).
+
+ To compile it as a module, choose M here: the module will be called
+ kingsun-sir.
+
+config KSDAZZLE_DONGLE
+ tristate "KingSun Dazzle IrDA-USB dongle"
+ depends on IRDA && USB
+ help
+ Say Y or M here if you want to build support for the KingSun Dazzle
+ IrDA-USB bridge device driver.
+
+ This USB bridge does not conform to the IrDA-USB device class
+ specification, and therefore needs its own specific driver. This
+ dongle supports SIR speeds only (9600 through 115200 bps).
+
+ To compile it as a module, choose M here: the module will be called
+ ksdazzle-sir.
+
+config KS959_DONGLE
+ tristate "KingSun KS-959 IrDA-USB dongle"
+ depends on IRDA && USB
+ help
+ Say Y or M here if you want to build support for the KingSun KS-959
+ IrDA-USB bridge device driver.
+
+ This USB bridge does not conform to the IrDA-USB device class
+ specification, and therefore needs its own specific driver. This
+ dongle supports SIR speeds only (9600 through 57600 bps).
+
+ To compile it as a module, choose M here: the module will be called
+ ks959-sir.
+
+comment "FIR device drivers"
+
+config USB_IRDA
+ tristate "IrDA USB dongles"
+ depends on IRDA && USB
+ select FW_LOADER
+ ---help---
+ Say Y here if you want to build support for the USB IrDA FIR Dongle
+ device driver. To compile it as a module, choose M here: the module
+ will be called irda-usb. IrDA-USB support the various IrDA USB
+ dongles available and most of their peculiarities. Those dongles
+ plug in the USB port of your computer, are plug and play, and
+ support SIR and FIR (4Mbps) speeds. On the other hand, those
+ dongles tend to be less efficient than a FIR chipset.
+
+ Please note that the driver is still experimental. And of course,
+ you will need both USB and IrDA support in your kernel...
+
+config SIGMATEL_FIR
+ tristate "SigmaTel STIr4200 bridge"
+ depends on IRDA && USB
+ select CRC32
+ ---help---
+ Say Y here if you want to build support for the SigmaTel STIr4200
+ USB IrDA FIR bridge device driver.
+
+ USB bridge based on the SigmaTel STIr4200 don't conform to the
+ IrDA-USB device class specification, and therefore need their
+ own specific driver. Those dongles support SIR and FIR (4Mbps)
+ speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ stir4200.
+
+config NSC_FIR
+ tristate "NSC PC87108/PC87338"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the NSC PC87108 and
+ PC87338 IrDA chipsets. This driver supports SIR,
+ MIR and FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ nsc-ircc.
+
+config WINBOND_FIR
+ tristate "Winbond W83977AF (IR)"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build IrDA support for the Winbond
+ W83977AF super-io chipset. This driver should be used for the IrDA
+ chipset in the Corel NetWinder. The driver supports SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ w83977af_ir.
+
+config TOSHIBA_FIR
+ tristate "Toshiba Type-O IR Port"
+ depends on IRDA && PCI && !64BIT && VIRT_TO_BUS
+ help
+ Say Y here if you want to build support for the Toshiba Type-O IR
+ and Donau oboe chipsets. These chipsets are used by the Toshiba
+ Libretto 100/110CT, Tecra 8100, Portege 7020 and many more laptops.
+ To compile it as a module, choose M here: the module will be called
+ donauboe.
+
+config AU1000_FIR
+ tristate "Alchemy IrDA SIR/FIR"
+ depends on IRDA && MIPS_ALCHEMY
+ help
+ Say Y/M here to build support the IrDA peripheral on the
+ Alchemy Au1000 and Au1100 SoCs.
+ Say M to build a module; it will be called au1k_ir.ko
+
+config SMC_IRCC_FIR
+ tristate "SMSC IrCC"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the SMC Infrared
+ Communications Controller. It is used in a wide variety of
+ laptops (Fujitsu, Sony, Compaq and some Toshiba).
+ To compile it as a module, choose M here: the module will be called
+ smsc-ircc2.o.
+
+config ALI_FIR
+ tristate "ALi M5123 FIR"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the ALi M5123 FIR
+ Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
+ M1535, M1535D, M1535+, M1535D South Bridge. This driver supports
+ SIR, MIR and FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ ali-ircc.
+
+config VLSI_FIR
+ tristate "VLSI 82C147 SIR/MIR/FIR"
+ depends on IRDA && PCI
+ help
+ Say Y here if you want to build support for the VLSI 82C147
+ PCI-IrDA Controller. This controller is used by the HP OmniBook 800
+ and 5500 notebooks. The driver provides support for SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ vlsi_ir.
+
+config SA1100_FIR
+ tristate "SA1100 Internal IR"
+ depends on ARCH_SA1100 && IRDA && DMA_SA11X0
+
+config VIA_FIR
+ tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
+ depends on IRDA && ISA_DMA_API
+ help
+ Say Y here if you want to build support for the VIA VT8231
+ and VIA VT1211 IrDA controllers, found on the motherboards using
+ those VIA chipsets. To use this controller, you will need
+ to plug a specific 5 pins FIR IrDA dongle in the specific
+ motherboard connector. The driver provides support for SIR, MIR
+ and FIR (4Mbps) speeds.
+
+ You will need to specify the 'dongle_id' module parameter to
+ indicate the FIR dongle attached to the controller.
+
+ To compile it as a module, choose M here: the module will be called
+ via-ircc.
+
+config PXA_FICP
+ tristate "Intel PXA2xx Internal FICP"
+ depends on ARCH_PXA && IRDA
+ help
+ Say Y or M here if you want to build support for the PXA2xx
+ built-in IRDA interface which can support both SIR and FIR.
+ This driver relies on platform specific helper routines so
+ available capabilities may vary from one PXA2xx target to
+ another.
+
+config MCS_FIR
+ tristate "MosChip MCS7780 IrDA-USB dongle"
+ depends on IRDA && USB
+ select CRC32
+ help
+ Say Y or M here if you want to build support for the MosChip
+ MCS7780 IrDA-USB bridge device driver.
+
+ USB bridge based on the MosChip MCS7780 don't conform to the
+ IrDA-USB device class specification, and therefore need their
+ own specific driver. Those dongles support SIR and FIR (4Mbps)
+ speeds.
+
+ To compile it as a module, choose M here: the module will be called
+ mcs7780.
+
+endmenu
+
diff --git a/drivers/staging/irda/drivers/Makefile b/drivers/staging/irda/drivers/Makefile
new file mode 100644
index 000000000000..e2901b135528
--- /dev/null
+++ b/drivers/staging/irda/drivers/Makefile
@@ -0,0 +1,44 @@
+#
+# Makefile for the Linux IrDA infrared port device drivers.
+#
+# 9 Aug 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+subdir-ccflags-y += -I$(srctree)/drivers/staging/irda/include
+
+# FIR drivers
+obj-$(CONFIG_USB_IRDA) += irda-usb.o
+obj-$(CONFIG_SIGMATEL_FIR) += stir4200.o
+obj-$(CONFIG_NSC_FIR) += nsc-ircc.o
+obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o
+obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o
+obj-$(CONFIG_TOSHIBA_FIR) += donauboe.o
+obj-$(CONFIG_SMC_IRCC_FIR) += smsc-ircc2.o
+obj-$(CONFIG_ALI_FIR) += ali-ircc.o
+obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
+obj-$(CONFIG_VIA_FIR) += via-ircc.o
+obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
+obj-$(CONFIG_MCS_FIR) += mcs7780.o
+obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+# SIR drivers
+obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
+obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
+obj-$(CONFIG_SH_SIR) += sh_sir.o
+# dongle drivers for SIR drivers
+obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
+obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
+obj-$(CONFIG_ACTISYS_DONGLE) += actisys-sir.o
+obj-$(CONFIG_LITELINK_DONGLE) += litelink-sir.o
+obj-$(CONFIG_GIRBIL_DONGLE) += girbil-sir.o
+obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o
+obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
+obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
+obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
+obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
+obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
+obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
+obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
+
+# The SIR helper module
+sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/staging/irda/drivers/act200l-sir.c b/drivers/staging/irda/drivers/act200l-sir.c
new file mode 100644
index 000000000000..e8917511e1aa
--- /dev/null
+++ b/drivers/staging/irda/drivers/act200l-sir.c
@@ -0,0 +1,250 @@
+/*********************************************************************
+ *
+ * Filename: act200l.c
+ * Version: 0.8
+ * Description: Implementation for the ACTiSYS ACT-IR200L dongle
+ * Status: Experimental.
+ * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ * Created at: Fri Aug 3 17:35:42 2001
+ * Modified at: Fri Aug 17 10:22:40 2001
+ * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
+ *
+ * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int act200l_reset(struct sir_dev *dev);
+static int act200l_open(struct sir_dev *dev);
+static int act200l_close(struct sir_dev *dev);
+static int act200l_change_speed(struct sir_dev *dev, unsigned speed);
+
+/* Regsiter 0: Control register #1 */
+#define ACT200L_REG0 0x00
+#define ACT200L_TXEN 0x01 /* Enable transmitter */
+#define ACT200L_RXEN 0x02 /* Enable receiver */
+
+/* Register 1: Control register #2 */
+#define ACT200L_REG1 0x10
+#define ACT200L_LODB 0x01 /* Load new baud rate count value */
+#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
+
+/* Register 4: Output Power register */
+#define ACT200L_REG4 0x40
+#define ACT200L_OP0 0x01 /* Enable LED1C output */
+#define ACT200L_OP1 0x02 /* Enable LED2C output */
+#define ACT200L_BLKR 0x04
+
+/* Register 5: Receive Mode register */
+#define ACT200L_REG5 0x50
+#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
+
+/* Register 6: Receive Sensitivity register #1 */
+#define ACT200L_REG6 0x60
+#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
+#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
+
+/* Register 7: Receive Sensitivity register #2 */
+#define ACT200L_REG7 0x70
+#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
+
+/* Register 8,9: Baud Rate Dvider register #1,#2 */
+#define ACT200L_REG8 0x80
+#define ACT200L_REG9 0x90
+
+#define ACT200L_2400 0x5f
+#define ACT200L_9600 0x17
+#define ACT200L_19200 0x0b
+#define ACT200L_38400 0x05
+#define ACT200L_57600 0x03
+#define ACT200L_115200 0x01
+
+/* Register 13: Control register #3 */
+#define ACT200L_REG13 0xd0
+#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
+
+/* Register 15: Status register */
+#define ACT200L_REG15 0xf0
+
+/* Register 21: Control register #4 */
+#define ACT200L_REG21 0x50
+#define ACT200L_EXCK 0x02 /* Disable clock output driver */
+#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
+
+static struct dongle_driver act200l = {
+ .owner = THIS_MODULE,
+ .driver_name = "ACTiSYS ACT-IR200L",
+ .type = IRDA_ACT200L_DONGLE,
+ .open = act200l_open,
+ .close = act200l_close,
+ .reset = act200l_reset,
+ .set_speed = act200l_change_speed,
+};
+
+static int __init act200l_sir_init(void)
+{
+ return irda_register_dongle(&act200l);
+}
+
+static void __exit act200l_sir_cleanup(void)
+{
+ irda_unregister_dongle(&act200l);
+}
+
+static int act200l_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power on the dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int act200l_close(struct sir_dev *dev)
+{
+ /* Power off the dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function act200l_change_speed (dev, speed)
+ *
+ * Set the speed for the ACTiSYS ACT-IR200L type dongle.
+ *
+ */
+static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ u8 control[3];
+ int ret = 0;
+
+ /* Clear DTR and set RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ switch (speed) {
+ default:
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
+ break;
+ case 19200:
+ control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
+ break;
+ case 38400:
+ control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
+ break;
+ case 57600:
+ control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
+ break;
+ case 115200:
+ control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
+ control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
+ break;
+ }
+ control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 3);
+ msleep(5);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ dev->speed = speed;
+ return ret;
+}
+
+/*
+ * Function act200l_reset (driver)
+ *
+ * Reset the ACTiSYS ACT-IR200L type dongle.
+ */
+
+#define ACT200L_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
+#define ACT200L_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
+
+static int act200l_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ static const u8 control[9] = {
+ ACT200L_REG15,
+ ACT200L_REG13 | ACT200L_SHDW,
+ ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
+ ACT200L_REG13,
+ ACT200L_REG7 | ACT200L_ENPOS,
+ ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
+ ACT200L_REG5 | ACT200L_RWIDL,
+ ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
+ ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
+ };
+ int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ /* Reset the dongle : set RTS low for 25 ms */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ state = ACT200L_STATE_WAIT1_RESET;
+ delay = 50;
+ break;
+
+ case ACT200L_STATE_WAIT1_RESET:
+ /* Clear DTR and set RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ udelay(25); /* better wait for some short while */
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, sizeof(control));
+ state = ACT200L_STATE_WAIT2_RESET;
+ delay = 15;
+ break;
+
+ case ACT200L_STATE_WAIT2_RESET:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ dev->speed = 9600;
+ break;
+ default:
+ net_err_ratelimited("%s(), unknown state %d\n",
+ __func__, state);
+ ret = -1;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
+MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
+
+module_init(act200l_sir_init);
+module_exit(act200l_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/actisys-sir.c b/drivers/staging/irda/drivers/actisys-sir.c
new file mode 100644
index 000000000000..e224b8b99517
--- /dev/null
+++ b/drivers/staging/irda/drivers/actisys-sir.c
@@ -0,0 +1,245 @@
+/*********************************************************************
+ *
+ * Filename: actisys.c
+ * Version: 1.1
+ * Description: Implementation for the ACTiSYS IR-220L and IR-220L+
+ * dongles
+ * Status: Beta.
+ * Authors: Dag Brattli <dagb@cs.uit.no> (initially)
+ * Jean Tourrilhes <jt@hpl.hp.com> (new version)
+ * Martin Diehl <mad@mdiehl.de> (new version for sir_dev)
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Sun Oct 27 22:02:13 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999 Jean Tourrilhes
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * Changelog
+ *
+ * 0.8 -> 0.9999 - Jean
+ * o New initialisation procedure : much safer and correct
+ * o New procedure the change speed : much faster and simpler
+ * o Other cleanups & comments
+ * Thanks to Lichen Wang @ Actisys for his excellent help...
+ *
+ * 1.0 -> 1.1 - Martin Diehl
+ * modified for new sir infrastructure
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/*
+ * Define the timing of the pulses we send to the dongle (to reset it, and
+ * to toggle speeds). Basically, the limit here is the propagation speed of
+ * the signals through the serial port, the dongle being much faster. Any
+ * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can
+ * go through cleanly . If you are on the wild side, you can try to lower
+ * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!)
+ */
+#define MIN_DELAY 10 /* 10 us to be on the conservative side */
+
+static int actisys_open(struct sir_dev *);
+static int actisys_close(struct sir_dev *);
+static int actisys_change_speed(struct sir_dev *, unsigned);
+static int actisys_reset(struct sir_dev *);
+
+/* These are the baudrates supported, in the order available */
+/* Note : the 220L doesn't support 38400, but we will fix that below */
+static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 };
+
+#define MAX_SPEEDS ARRAY_SIZE(baud_rates)
+
+static struct dongle_driver act220l = {
+ .owner = THIS_MODULE,
+ .driver_name = "Actisys ACT-220L",
+ .type = IRDA_ACTISYS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .set_speed = actisys_change_speed,
+};
+
+static struct dongle_driver act220l_plus = {
+ .owner = THIS_MODULE,
+ .driver_name = "Actisys ACT-220L+",
+ .type = IRDA_ACTISYS_PLUS_DONGLE,
+ .open = actisys_open,
+ .close = actisys_close,
+ .reset = actisys_reset,
+ .set_speed = actisys_change_speed,
+};
+
+static int __init actisys_sir_init(void)
+{
+ int ret;
+
+ /* First, register an Actisys 220L dongle */
+ ret = irda_register_dongle(&act220l);
+ if (ret < 0)
+ return ret;
+
+ /* Now, register an Actisys 220L+ dongle */
+ ret = irda_register_dongle(&act220l_plus);
+ if (ret < 0) {
+ irda_unregister_dongle(&act220l);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit actisys_sir_cleanup(void)
+{
+ /* We have to remove both dongles */
+ irda_unregister_dongle(&act220l_plus);
+ irda_unregister_dongle(&act220l);
+}
+
+static int actisys_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* Remove support for 38400 if this is not a 220L+ dongle */
+ if (dev->dongle_drv->type == IRDA_ACTISYS_DONGLE)
+ qos->baud_rate.bits &= ~IR_38400;
+
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int actisys_close(struct sir_dev *dev)
+{
+ /* Power off the dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function actisys_change_speed (task)
+ *
+ * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles.
+ * To cycle through the available baud rates, pulse RTS low for a few us.
+ *
+ * First, we reset the dongle to always start from a known state.
+ * Then, we cycle through the speeds by pulsing RTS low and then up.
+ * The dongle allow us to pulse quite fast, se we can set speed in one go,
+ * which is must faster ( < 100 us) and less complex than what is found
+ * in some other dongle drivers...
+ * Note that even if the new speed is the same as the current speed,
+ * we reassert the speed. This make sure that things are all right,
+ * and it's fast anyway...
+ * By the way, this function will work for both type of dongles,
+ * because the additional speed is at the end of the sequence...
+ */
+static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int ret = 0;
+ int i = 0;
+
+ pr_debug("%s(), speed=%d (was %d)\n", __func__, speed, dev->speed);
+
+ /* dongle was already resetted from irda_request state machine,
+ * we are in known state (dongle default)
+ */
+
+ /*
+ * Now, we can set the speed requested. Send RTS pulses until we
+ * reach the target speed
+ */
+ for (i = 0; i < MAX_SPEEDS; i++) {
+ if (speed == baud_rates[i]) {
+ dev->speed = speed;
+ break;
+ }
+ /* Set RTS low for 10 us */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ udelay(MIN_DELAY);
+
+ /* Set RTS high for 10 us */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ udelay(MIN_DELAY);
+ }
+
+ /* Check if life is sweet... */
+ if (i >= MAX_SPEEDS) {
+ actisys_reset(dev);
+ ret = -EINVAL; /* This should not happen */
+ }
+
+ /* Basta lavoro, on se casse d'ici... */
+ return ret;
+}
+
+/*
+ * Function actisys_reset (task)
+ *
+ * Reset the Actisys type dongle. Warning, this function must only be
+ * called with a process context!
+ *
+ * We need to do two things in this function :
+ * o first make sure that the dongle is in a state where it can operate
+ * o second put the dongle in a know state
+ *
+ * The dongle is powered of the RTS and DTR lines. In the dongle, there
+ * is a big capacitor to accommodate the current spikes. This capacitor
+ * takes a least 50 ms to be charged. In theory, the Bios set those lines
+ * up, so by the time we arrive here we should be set. It doesn't hurt
+ * to be on the conservative side, so we will wait...
+ * <Martin : move above comment to irda_config_fsm>
+ * Then, we set the speed to 9600 b/s to get in a known state (see in
+ * change_speed for details). It is needed because the IrDA stack
+ * has tried to set the speed immediately after our first return,
+ * so before we can be sure the dongle is up and running.
+ */
+
+static int actisys_reset(struct sir_dev *dev)
+{
+ /* Reset the dongle : set DTR low for 10 us */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ dev->speed = 9600; /* That's the default */
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */
+MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */
+
+module_init(actisys_sir_init);
+module_exit(actisys_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/ali-ircc.c b/drivers/staging/irda/drivers/ali-ircc.c
new file mode 100644
index 000000000000..35f198d83701
--- /dev/null
+++ b/drivers/staging/irda/drivers/ali-ircc.c
@@ -0,0 +1,2218 @@
+/*********************************************************************
+ *
+ * Filename: ali-ircc.h
+ * Version: 0.5
+ * Description: Driver for the ALI M1535D and M1543C FIR Controller
+ * Status: Experimental.
+ * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Created at: 2000/10/16 03:46PM
+ * Modified at: 2001/1/3 02:55PM
+ * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563
+ * Modified by: Clear Zhang <clear_zhang@ali.com.tw>
+ *
+ * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "ali-ircc.h"
+
+#define CHIP_IO_EXTENT 8
+#define BROKEN_DONGLE_ID
+
+#define ALI_IRCC_DRIVER_NAME "ali-ircc"
+
+/* Power Management */
+static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int ali_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver ali_ircc_driver = {
+ .suspend = ali_ircc_suspend,
+ .resume = ali_ircc_resume,
+ .driver = {
+ .name = ALI_IRCC_DRIVER_NAME,
+ },
+};
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+/* Use BIOS settions by default, but user may supply module parameters */
+static unsigned int io[] = { ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0 };
+
+static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
+static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
+static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
+
+/* These are the currently known ALi south-bridge chipsets, the only one difference
+ * is that M1543C doesn't support HP HDSL-3600
+ */
+static ali_chip_t chips[] =
+{
+ { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
+ { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
+ { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
+ { NULL }
+};
+
+/* Max 4 instances for now */
+static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
+
+/* Dongle Types */
+static char *dongle_types[] = {
+ "TFDS6000",
+ "HP HSDL-3600",
+ "HP HSDL-1100",
+ "No dongle connected",
+};
+
+/* Some prototypes */
+static int ali_ircc_open(int i, chipio_t *info);
+
+static int ali_ircc_close(struct ali_ircc_cb *self);
+
+static int ali_ircc_setup(chipio_t *info);
+static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
+static int ali_ircc_net_open(struct net_device *dev);
+static int ali_ircc_net_close(struct net_device *dev);
+static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
+
+/* SIR function */
+static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
+static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
+static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
+static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
+static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
+
+/* FIR function */
+static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
+static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
+static int ali_ircc_dma_receive(struct ali_ircc_cb *self);
+static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
+static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
+static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
+
+/* My Function */
+static int ali_ircc_read_dongle_id (int i, chipio_t *info);
+static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
+
+/* ALi chip function */
+static void SIR2FIR(int iobase);
+static void FIR2SIR(int iobase);
+static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
+
+/*
+ * Function ali_ircc_init ()
+ *
+ * Initialize chip. Find out whay kinds of chips we are dealing with
+ * and their configuration registers address
+ */
+static int __init ali_ircc_init(void)
+{
+ ali_chip_t *chip;
+ chipio_t info;
+ int ret;
+ int cfg, cfg_base;
+ int reg, revision;
+ int i = 0;
+
+ ret = platform_driver_register(&ali_ircc_driver);
+ if (ret) {
+ net_err_ratelimited("%s, Can't register driver!\n",
+ ALI_IRCC_DRIVER_NAME);
+ return ret;
+ }
+
+ ret = -ENODEV;
+
+ /* Probe for all the ALi chipsets we know about */
+ for (chip= chips; chip->name; chip++, i++)
+ {
+ pr_debug("%s(), Probing for %s ...\n", __func__, chip->name);
+
+ /* Try all config registers for this chip */
+ for (cfg=0; cfg<2; cfg++)
+ {
+ cfg_base = chip->cfg[cfg];
+ if (!cfg_base)
+ continue;
+
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+
+ /* Enter Configuration */
+ outb(chip->entr1, cfg_base);
+ outb(chip->entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read Chip Identification Register */
+ outb(chip->cid_index, cfg_base);
+ reg = inb(cfg_base+1);
+
+ if (reg == chip->cid_value)
+ {
+ pr_debug("%s(), Chip found at 0x%03x\n",
+ __func__, cfg_base);
+
+ outb(0x1F, cfg_base);
+ revision = inb(cfg_base+1);
+ pr_debug("%s(), Found %s chip, revision=%d\n",
+ __func__, chip->name, revision);
+
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 2000)
+ {
+ chip->init(chip, &info);
+ }
+ else
+ {
+ chip->probe(chip, &info);
+ }
+
+ if (ali_ircc_open(i, &info) == 0)
+ ret = 0;
+ i++;
+ }
+ else
+ {
+ pr_debug("%s(), No %s chip at 0x%03x\n",
+ __func__, chip->name, cfg_base);
+ }
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+ }
+ }
+
+ if (ret)
+ platform_driver_unregister(&ali_ircc_driver);
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit ali_ircc_cleanup(void)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(dev_self); i++) {
+ if (dev_self[i])
+ ali_ircc_close(dev_self[i]);
+ }
+
+ platform_driver_unregister(&ali_ircc_driver);
+
+}
+
+static const struct net_device_ops ali_ircc_sir_ops = {
+ .ndo_open = ali_ircc_net_open,
+ .ndo_stop = ali_ircc_net_close,
+ .ndo_start_xmit = ali_ircc_sir_hard_xmit,
+ .ndo_do_ioctl = ali_ircc_net_ioctl,
+};
+
+static const struct net_device_ops ali_ircc_fir_ops = {
+ .ndo_open = ali_ircc_net_open,
+ .ndo_stop = ali_ircc_net_close,
+ .ndo_start_xmit = ali_ircc_fir_hard_xmit,
+ .ndo_do_ioctl = ali_ircc_net_ioctl,
+};
+
+/*
+ * Function ali_ircc_open (int i, chipio_t *inf)
+ *
+ * Open driver instance
+ *
+ */
+static int ali_ircc_open(int i, chipio_t *info)
+{
+ struct net_device *dev;
+ struct ali_ircc_cb *self;
+ int dongle_id;
+ int err;
+
+ if (i >= ARRAY_SIZE(dev_self)) {
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Set FIR FIFO and DMA Threshold */
+ if ((ali_ircc_setup(info)) == -1)
+ return -1;
+
+ dev = alloc_irdadev(sizeof(*self));
+ if (dev == NULL) {
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+ self->index = i;
+
+ /* Initialize IO */
+ self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */
+ self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */
+ self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
+
+ /* Reserve the ioports that we need */
+ if (!request_region(self->io.fir_base, self->io.fir_ext,
+ ALI_IRCC_DRIVER_NAME)) {
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
+ err = -ENODEV;
+ goto err_out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 14384;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ dev->netdev_ops = &ali_ircc_sir_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
+ goto err_out4;
+ }
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+
+ /* Check dongle id */
+ dongle_id = ali_ircc_read_dongle_id(i, info);
+ net_info_ratelimited("%s(), %s, Found dongle: %s\n",
+ __func__, ALI_IRCC_DRIVER_NAME,
+ dongle_types[dongle_id]);
+
+ self->io.dongle_id = dongle_id;
+
+
+ return 0;
+
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ err_out1:
+ dev_self[i] = NULL;
+ free_netdev(dev);
+ return err;
+}
+
+
+/*
+ * Function ali_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit ali_ircc_close(struct ali_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_init_43 (chip, info)
+ *
+ * Initialize the ALi M1543 chip.
+ */
+static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info)
+{
+ /* All controller information like I/O address, DMA channel, IRQ
+ * are set by BIOS
+ */
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_init_53 (chip, info)
+ *
+ * Initialize the ALi M1535 chip.
+ */
+static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info)
+{
+ /* All controller information like I/O address, DMA channel, IRQ
+ * are set by BIOS
+ */
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_probe_53 (chip, info)
+ *
+ * Probes for the ALi M1535D or M1535
+ */
+static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int hi, low, reg;
+
+
+ /* Enter Configuration */
+ outb(chip->entr1, cfg_base);
+ outb(chip->entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read address control register */
+ outb(0x60, cfg_base);
+ hi = inb(cfg_base+1);
+ outb(0x61, cfg_base);
+ low = inb(cfg_base+1);
+ info->fir_base = (hi<<8) + low;
+
+ info->sir_base = info->fir_base;
+
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
+
+ /* Read IRQ control register */
+ outb(0x70, cfg_base);
+ reg = inb(cfg_base+1);
+ info->irq = reg & 0x0f;
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
+
+ /* Read DMA channel */
+ outb(0x74, cfg_base);
+ reg = inb(cfg_base+1);
+ info->dma = reg & 0x07;
+
+ if(info->dma == 0x04)
+ net_warn_ratelimited("%s(), No DMA channel assigned !\n",
+ __func__);
+ else
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
+
+ /* Read Enabled Status */
+ outb(0x30, cfg_base);
+ reg = inb(cfg_base+1);
+ info->enabled = (reg & 0x80) && (reg & 0x01);
+ pr_debug("%s(), probing enabled=%d\n", __func__, info->enabled);
+
+ /* Read Power Status */
+ outb(0x22, cfg_base);
+ reg = inb(cfg_base+1);
+ info->suspended = (reg & 0x20);
+ pr_debug("%s(), probing suspended=%d\n", __func__, info->suspended);
+
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_setup (info)
+ *
+ * Set FIR FIFO and DMA Threshold
+ * Returns non-negative on success.
+ *
+ */
+static int ali_ircc_setup(chipio_t *info)
+{
+ unsigned char tmp;
+ int version;
+ int iobase = info->fir_base;
+
+
+ /* Locking comments :
+ * Most operations here need to be protected. We are called before
+ * the device instance is created in ali_ircc_open(), therefore
+ * nobody can bother us - Jean II */
+
+ /* Switch to FIR space */
+ SIR2FIR(iobase);
+
+ /* Master Reset */
+ outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
+
+ /* Read FIR ID Version Register */
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+FIR_ID_VR);
+
+ /* Should be 0x00 in the M1535/M1535D */
+ if(version != 0x00)
+ {
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ ALI_IRCC_DRIVER_NAME, version);
+ return -1;
+ }
+
+ /* Set FIR FIFO Threshold Register */
+ switch_bank(iobase, BANK1);
+ outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
+
+ /* Set FIR DMA Threshold Register */
+ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* CRC enable */
+ switch_bank(iobase, BANK2);
+ outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
+
+ /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
+
+ /* Switch to Bank 0 */
+ switch_bank(iobase, BANK0);
+
+ tmp = inb(iobase+FIR_LCR_B);
+ tmp &=~0x20; // disable SIP
+ tmp |= 0x80; // these two steps make RX mode
+ tmp &= 0xbf;
+ outb(tmp, iobase+FIR_LCR_B);
+
+ /* Disable Interrupt */
+ outb(0x00, iobase+FIR_IER);
+
+
+ /* Switch to SIR space */
+ FIR2SIR(iobase);
+
+ net_info_ratelimited("%s, driver loaded (Benjamin Kong)\n",
+ ALI_IRCC_DRIVER_NAME);
+
+ /* Enable receive interrupts */
+ // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
+ // Turn on the interrupts in ali_ircc_net_open
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_read_dongle_id (int index, info)
+ *
+ * Try to read dongle identification. This procedure needs to be executed
+ * once after power-on/reset. It also needs to be used whenever you suspect
+ * that the user may have plugged/unplugged the IrDA Dongle.
+ */
+static int ali_ircc_read_dongle_id (int i, chipio_t *info)
+{
+ int dongle_id, reg;
+ int cfg_base = info->cfg_base;
+
+
+ /* Enter Configuration */
+ outb(chips[i].entr1, cfg_base);
+ outb(chips[i].entr2, cfg_base);
+
+ /* Select Logical Device 5 Registers (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base+1);
+
+ /* Read Dongle ID */
+ outb(0xf0, cfg_base);
+ reg = inb(cfg_base+1);
+ dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
+ pr_debug("%s(), probing dongle_id=%d, dongle_types=%s\n",
+ __func__, dongle_id, dongle_types[dongle_id]);
+
+ /* Exit configuration */
+ outb(0xbb, cfg_base);
+
+
+ return dongle_id;
+}
+
+/*
+ * Function ali_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ali_ircc_cb *self;
+ int ret;
+
+
+ self = netdev_priv(dev);
+
+ spin_lock(&self->lock);
+
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > 115200)
+ ret = ali_ircc_fir_interrupt(self);
+ else
+ ret = ali_ircc_sir_interrupt(self);
+
+ spin_unlock(&self->lock);
+
+ return ret;
+}
+/*
+ * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
+{
+ __u8 eir, OldMessageCount;
+ int iobase, tmp;
+
+
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK0);
+ self->InterruptID = inb(iobase+FIR_IIR);
+ self->BusStatus = inb(iobase+FIR_BSR);
+
+ OldMessageCount = (self->LineStatus + 1) & 0x07;
+ self->LineStatus = inb(iobase+FIR_LSR);
+ //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
+ eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
+
+ pr_debug("%s(), self->InterruptID = %x\n", __func__, self->InterruptID);
+ pr_debug("%s(), self->LineStatus = %x\n", __func__, self->LineStatus);
+ pr_debug("%s(), self->ier = %x\n", __func__, self->ier);
+ pr_debug("%s(), eir = %x\n", __func__, eir);
+
+ /* Disable interrupts */
+ SetCOMInterrupts(self, FALSE);
+
+ /* Tx or Rx Interrupt */
+
+ if (eir & IIR_EOM)
+ {
+ if (self->io.direction == IO_XMIT) /* TX */
+ {
+ pr_debug("%s(), ******* IIR_EOM (Tx) *******\n",
+ __func__);
+
+ if(ali_ircc_dma_xmit_complete(self))
+ {
+ if (irda_device_txqueue_empty(self->netdev))
+ {
+ /* Prepare for receive */
+ ali_ircc_dma_receive(self);
+ self->ier = IER_EOM;
+ }
+ }
+ else
+ {
+ self->ier = IER_EOM;
+ }
+
+ }
+ else /* RX */
+ {
+ pr_debug("%s(), ******* IIR_EOM (Rx) *******\n",
+ __func__);
+
+ if(OldMessageCount > ((self->LineStatus+1) & 0x07))
+ {
+ self->rcvFramesOverflow = TRUE;
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE ********\n",
+ __func__);
+ }
+
+ if (ali_ircc_dma_receive_complete(self))
+ {
+ pr_debug("%s(), ******* receive complete ********\n",
+ __func__);
+
+ self->ier = IER_EOM;
+ }
+ else
+ {
+ pr_debug("%s(), ******* Not receive complete ********\n",
+ __func__);
+
+ self->ier = IER_EOM | IER_TIMER;
+ }
+
+ }
+ }
+ /* Timer Interrupt */
+ else if (eir & IIR_TIMER)
+ {
+ if(OldMessageCount > ((self->LineStatus+1) & 0x07))
+ {
+ self->rcvFramesOverflow = TRUE;
+ pr_debug("%s(), ******* self->rcvFramesOverflow = TRUE *******\n",
+ __func__);
+ }
+ /* Disable Timer */
+ switch_bank(iobase, BANK1);
+ tmp = inb(iobase+FIR_CR);
+ outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
+
+ /* Check if this is a Tx timer interrupt */
+ if (self->io.direction == IO_XMIT)
+ {
+ ali_ircc_dma_xmit(self);
+
+ /* Interrupt on EOM */
+ self->ier = IER_EOM;
+
+ }
+ else /* Rx */
+ {
+ if(ali_ircc_dma_receive_complete(self))
+ {
+ self->ier = IER_EOM;
+ }
+ else
+ {
+ self->ier = IER_EOM | IER_TIMER;
+ }
+ }
+ }
+
+ /* Restore Interrupt */
+ SetCOMInterrupts(self, TRUE);
+
+ return IRQ_RETVAL(eir);
+}
+
+/*
+ * Function ali_ircc_sir_interrupt (irq, self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
+{
+ int iobase;
+ int iir, lsr;
+
+
+ iobase = self->io.sir_base;
+
+ iir = inb(iobase+UART_IIR) & UART_IIR_ID;
+ if (iir) {
+ /* Clear interrupt */
+ lsr = inb(iobase+UART_LSR);
+
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
+
+ switch (iir)
+ {
+ case UART_IIR_RLSI:
+ pr_debug("%s(), RLSI\n", __func__);
+ break;
+ case UART_IIR_RDI:
+ /* Receive interrupt */
+ ali_ircc_sir_receive(self);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ {
+ /* Transmitter ready for data */
+ ali_ircc_sir_write_wakeup(self);
+ }
+ break;
+ default:
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
+ break;
+ }
+
+ }
+
+
+ return IRQ_RETVAL(iir);
+}
+
+
+/*
+ * Function ali_ircc_sir_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ */
+static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
+ inb(iobase+UART_RX));
+
+ /* Make sure we don't stay here too long */
+ if (boguscount++ > 32) {
+ pr_debug("%s(), breaking!\n", __func__);
+ break;
+ }
+ } while (inb(iobase+UART_LSR) & UART_LSR_DR);
+
+}
+
+/*
+ * Function ali_ircc_sir_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ */
+static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
+{
+ int actual = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0)
+ {
+ /* Write data left in transmit buffer */
+ actual = ali_ircc_sir_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ }
+ else
+ {
+ if (self->new_speed)
+ {
+ /* We must wait until all data are gone */
+ while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
+ pr_debug("%s(), UART_LSR_THRE\n", __func__);
+
+ pr_debug("%s(), Changing speed! self->new_speed = %d\n",
+ __func__, self->new_speed);
+ ali_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+
+ // benjamin 2000/11/10 06:32PM
+ if (self->io.speed > 115200)
+ {
+ pr_debug("%s(), ali_ircc_change_speed from UART_LSR_TEMT\n",
+ __func__);
+
+ self->ier = IER_EOM;
+ // SetCOMInterrupts(self, TRUE);
+ return;
+ }
+ }
+ else
+ {
+ netif_wake_queue(self->netdev);
+ }
+
+ self->netdev->stats.tx_packets++;
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase+UART_IER);
+ }
+
+}
+
+static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
+{
+ struct net_device *dev = self->netdev;
+ int iobase;
+
+
+ pr_debug("%s(), setting speed = %d\n", __func__, baud);
+
+ /* This function *must* be called with irq off and spin-lock.
+ * - Jean II */
+
+ iobase = self->io.fir_base;
+
+ SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
+
+ /* Go to MIR, FIR Speed */
+ if (baud > 115200)
+ {
+
+
+ ali_ircc_fir_change_speed(self, baud);
+
+ /* Install FIR xmit handler*/
+ dev->netdev_ops = &ali_ircc_fir_ops;
+
+ /* Enable Interuupt */
+ self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
+
+ /* Be ready for incoming frames */
+ ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
+ }
+ /* Go to SIR Speed */
+ else
+ {
+ ali_ircc_sir_change_speed(self, baud);
+
+ /* Install SIR xmit handler*/
+ dev->netdev_ops = &ali_ircc_sir_ops;
+ }
+
+
+ SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM
+
+ netif_wake_queue(self->netdev);
+
+}
+
+static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
+{
+
+ int iobase;
+ struct ali_ircc_cb *self = priv;
+ struct net_device *dev;
+
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ dev = self->netdev;
+ iobase = self->io.fir_base;
+
+ pr_debug("%s(), self->io.speed = %d, change to speed = %d\n",
+ __func__, self->io.speed, baud);
+
+ /* Come from SIR speed */
+ if(self->io.speed <=115200)
+ {
+ SIR2FIR(iobase);
+ }
+
+ /* Update accounting for new speed */
+ self->io.speed = baud;
+
+ // Set Dongle Speed mode
+ ali_ircc_change_dongle_speed(self, baud);
+
+}
+
+/*
+ * Function ali_sir_change_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ */
+static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
+{
+ struct ali_ircc_cb *self = priv;
+ int iobase;
+ int fcr; /* FIFO control reg */
+ int lcr; /* Line control reg */
+ int divisor;
+
+
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /* Come from MIR or FIR speed */
+ if(self->io.speed >115200)
+ {
+ // Set Dongle Speed mode first
+ ali_ircc_change_dongle_speed(self, speed);
+
+ FIR2SIR(iobase);
+ }
+
+ // Clear Line and Auxiluary status registers 2000/11/24 11:47AM
+
+ inb(iobase+UART_LSR);
+ inb(iobase+UART_SCR);
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ divisor = 115200/speed;
+
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ if (self->io.speed < 38400)
+ fcr |= UART_FCR_TRIGGER_1;
+ else
+ fcr |= UART_FCR_TRIGGER_14;
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase+UART_DLM);
+ outb(lcr, iobase+UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
+
+ /* without this, the connection will be broken after come back from FIR speed,
+ but with this, the SIR connection is harder to established */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
+}
+
+static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
+{
+
+ struct ali_ircc_cb *self = priv;
+ int iobase,dongle_id;
+ int tmp = 0;
+
+
+ iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
+ dongle_id = self->io.dongle_id;
+
+ /* We are already locked, no need to do it again */
+
+ pr_debug("%s(), Set Speed for %s , Speed = %d\n",
+ __func__, dongle_types[dongle_id], speed);
+
+ switch_bank(iobase, BANK2);
+ tmp = inb(iobase+FIR_IRDA_CR);
+
+ /* IBM type dongle */
+ if(dongle_id == 0)
+ {
+ if(speed == 4000000)
+ {
+ // __ __
+ // SD/MODE __| |__ __
+ // __ __
+ // IRTX __ __| |__
+ // T1 T2 T3 T4 T5
+
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ // T1 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T2 -> SD/MODE:1 IRTX:0
+ tmp &= ~0x01;
+ tmp |= 0x0a;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T3 -> SD/MODE:1 IRTX:1
+ tmp |= 0x0b;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T4 -> SD/MODE:0 IRTX:1
+ tmp &= ~0x08;
+ tmp |= 0x03;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T5 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // reset -> Normal TX output Signal
+ outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
+ }
+ else /* speed <=1152000 */
+ {
+ // __
+ // SD/MODE __| |__
+ //
+ // IRTX ________
+ // T1 T2 T3
+
+ /* MIR 115200, 57600 */
+ if (speed==1152000)
+ {
+ tmp |= 0xA0; //HDLC=1, 1.152Mbps=1
+ }
+ else
+ {
+ tmp &=~0x80; //HDLC 0.576Mbps
+ tmp |= 0x20; //HDLC=1,
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ /* MIR 115200, 57600 */
+
+ //switch_bank(iobase, BANK2);
+ // T1 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // T2 -> SD/MODE:1 IRTX:0
+ tmp &= ~0x01;
+ tmp |= 0x0a;
+ outb(tmp, iobase+FIR_IRDA_CR);
+
+ // T3 -> SD/MODE:0 IRTX:0
+ tmp &= ~0x09;
+ tmp |= 0x02;
+ outb(tmp, iobase+FIR_IRDA_CR);
+ udelay(2);
+
+ // reset -> Normal TX output Signal
+ outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
+ }
+ }
+ else if (dongle_id == 1) /* HP HDSL-3600 */
+ {
+ switch(speed)
+ {
+ case 4000000:
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ break;
+
+ case 1152000:
+ tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
+ break;
+
+ case 576000:
+ tmp &=~0x80; // HDLC 0.576Mbps
+ tmp |= 0x20; // HDLC=1,
+ break;
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ else /* HP HDSL-1100 */
+ {
+ if(speed <= 115200) /* SIR */
+ {
+
+ tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ else /* MIR FIR */
+ {
+
+ switch(speed)
+ {
+ case 4000000:
+ tmp &= ~IRDA_CR_HDLC; // HDLC=0
+ break;
+
+ case 1152000:
+ tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
+ break;
+
+ case 576000:
+ tmp &=~0x80; // HDLC 0.576Mbps
+ tmp |= 0x20; // HDLC=1,
+ break;
+ }
+
+ tmp |= IRDA_CR_CRC; // CRC=1
+ tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1
+
+ switch_bank(iobase, BANK2);
+ outb(tmp, iobase+FIR_IRDA_CR);
+ }
+ }
+
+ switch_bank(iobase, BANK0);
+
+}
+
+/*
+ * Function ali_ircc_sir_write (driver)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ */
+static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
+ pr_debug("%s(), failed, fifo not empty!\n", __func__);
+ return 0;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase+UART_TX);
+
+ actual++;
+ }
+
+ return actual;
+}
+
+/*
+ * Function ali_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int ali_ircc_net_open(struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Request IRQ and install Interrupt Handler */
+ if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
+ {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.irq);
+ return -EAGAIN;
+ }
+
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ ALI_IRCC_DRIVER_NAME, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Turn on interrups */
+ outb(UART_IER_RDI , iobase+UART_IER);
+
+ /* Ready to play! */
+ netif_start_queue(dev); //benjamin by irport
+
+ /* Give self a hardware name */
+ sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int ali_ircc_net_close(struct net_device *dev)
+{
+
+ struct ali_ircc_cb *self;
+ //int iobase;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ disable_dma(self->io.dma);
+
+ /* Disable interrupts */
+ SetCOMInterrupts(self, FALSE);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+
+ return 0;
+}
+
+/*
+ * Function ali_ircc_fir_hard_xmit (skb, dev)
+ *
+ * Transmit the frame
+ *
+ */
+static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __u32 speed;
+ int mtt, diff;
+
+
+ self = netdev_priv(dev);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Note : you should make sure that speed changes are not going
+ * to corrupt any outgoing frame. Look at nsc-ircc for the gory
+ * details - Jean II */
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ ali_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Register and copy this frame to DMA memory */
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+ self->tx_fifo.tail += skb->len;
+
+ dev->stats.tx_bytes += skb->len;
+
+ skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
+ skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+
+ /* Start transmit only if there is currently no transmit going on */
+ if (self->tx_fifo.len == 1)
+ {
+ /* Check if we must wait the min turn time or not */
+ mtt = irda_get_mtt(skb);
+
+ if (mtt)
+ {
+ /* Check how much time we have used already */
+ diff = ktime_us_delta(ktime_get(), self->stamp);
+ /* self->stamp is set from ali_ircc_dma_receive_complete() */
+
+ pr_debug("%s(), ******* diff = %d *******\n",
+ __func__, diff);
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff)
+ {
+ mtt -= diff;
+
+ /*
+ * Use timer if delay larger than 1000 us, and
+ * use udelay for smaller values which should
+ * be acceptable
+ */
+ if (mtt > 500)
+ {
+ /* Adjust for timer resolution */
+ mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
+
+ pr_debug("%s(), ************** mtt = %d ***********\n",
+ __func__, mtt);
+
+ /* Setup timer */
+ if (mtt == 1) /* 500 us */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
+ }
+ else if (mtt == 2) /* 1 ms */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
+ }
+ else /* > 2ms -> 4ms */
+ {
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
+ }
+
+
+ /* Start timer */
+ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ self->ier = IER_TIMER;
+ SetCOMInterrupts(self, TRUE);
+
+ /* Timer will take care of the rest */
+ goto out;
+ }
+ else
+ udelay(mtt);
+ } // if (if (mtt > diff)
+ }// if (mtt)
+
+ /* Enable EOM interrupt */
+ self->ier = IER_EOM;
+ SetCOMInterrupts(self, TRUE);
+
+ /* Transmit frame */
+ ali_ircc_dma_xmit(self);
+ } // if (self->tx_fifo.len == 1)
+
+ out:
+
+ /* Not busy transmitting anymore if window is not full */
+ if (self->tx_fifo.free < MAX_TX_WINDOW)
+ netif_wake_queue(self->netdev);
+
+ /* Restore bank register */
+ switch_bank(iobase, BANK0);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+
+static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
+{
+ int iobase, tmp;
+ unsigned char FIFO_OPTI, Hi, Lo;
+
+
+
+ iobase = self->io.fir_base;
+
+ /* FIFO threshold , this method comes from NDIS5 code */
+
+ if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
+ FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
+ else
+ FIFO_OPTI = TX_FIFO_Threshold;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ self->io.direction = IO_XMIT;
+
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ DMA_TX_MODE);
+
+ /* Reset Tx FIFO */
+ switch_bank(iobase, BANK0);
+ outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
+
+ /* Set Tx FIFO threshold */
+ if (self->fifo_opti_buf!=FIFO_OPTI)
+ {
+ switch_bank(iobase, BANK1);
+ outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
+ self->fifo_opti_buf=FIFO_OPTI;
+ }
+
+ /* Set Tx DMA threshold */
+ switch_bank(iobase, BANK1);
+ outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* Set max Tx frame size */
+ Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
+ Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
+ switch_bank(iobase, BANK2);
+ outb(Hi, iobase+FIR_TX_DSR_HI);
+ outb(Lo, iobase+FIR_TX_DSR_LO);
+
+ /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
+ switch_bank(iobase, BANK0);
+ tmp = inb(iobase+FIR_LCR_B);
+ tmp &= ~0x20; // Disable SIP
+ outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
+ pr_debug("%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
+
+ outb(0, iobase+FIR_LSR);
+
+ /* Enable DMA and Burst Mode */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
+
+ switch_bank(iobase, BANK0);
+
+}
+
+static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
+{
+ int iobase;
+ int ret = TRUE;
+
+
+ iobase = self->io.fir_base;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ /* Check for underrun! */
+ switch_bank(iobase, BANK0);
+ if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
+
+ {
+ net_err_ratelimited("%s(), ********* LSR_FRAME_ABORT *********\n",
+ __func__);
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+ }
+ else
+ {
+ self->netdev->stats.tx_packets++;
+ }
+
+ /* Check if we need to change the speed */
+ if (self->new_speed)
+ {
+ ali_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ self->tx_fifo.ptr++;
+ self->tx_fifo.len--;
+
+ /* Any frames to be sent back-to-back? */
+ if (self->tx_fifo.len)
+ {
+ ali_ircc_dma_xmit(self);
+
+ /* Not finished yet! */
+ ret = FALSE;
+ }
+ else
+ { /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ }
+
+ /* Make sure we have room for more frames */
+ if (self->tx_fifo.free < MAX_TX_WINDOW) {
+ /* Not busy transmitting anymore */
+ /* Tell the network layer, that we can accept more frames */
+ netif_wake_queue(self->netdev);
+ }
+
+ switch_bank(iobase, BANK0);
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
+{
+ int iobase, tmp;
+
+
+ iobase = self->io.fir_base;
+
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK1);
+ outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
+
+ /* Reset Message Count */
+ switch_bank(iobase, BANK0);
+ outb(0x07, iobase+FIR_LSR);
+
+ self->rcvFramesOverflow = FALSE;
+
+ self->LineStatus = inb(iobase+FIR_LSR) ;
+
+ /* Reset Rx FIFO info */
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Rx FIFO */
+ // switch_bank(iobase, BANK0);
+ outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
+
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Set Receive Mode,Brick Wall */
+ //switch_bank(iobase, BANK0);
+ tmp = inb(iobase+FIR_LCR_B);
+ outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
+ pr_debug("%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n",
+ __func__, inb(iobase + FIR_LCR_B));
+
+ /* Set Rx Threshold */
+ switch_bank(iobase, BANK1);
+ outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
+ outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
+
+ /* Enable DMA and Burst Mode */
+ // switch_bank(iobase, BANK1);
+ outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
+
+ switch_bank(iobase, BANK0);
+ return 0;
+}
+
+static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ __u8 status, MessageCount;
+ int len, i, iobase, val;
+
+ st_fifo = &self->st_fifo;
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK0);
+ MessageCount = inb(iobase+ FIR_LSR)&0x07;
+
+ if (MessageCount > 0)
+ pr_debug("%s(), Message count = %d\n", __func__, MessageCount);
+
+ for (i=0; i<=MessageCount; i++)
+ {
+ /* Bank 0 */
+ switch_bank(iobase, BANK0);
+ status = inb(iobase+FIR_LSR);
+
+ switch_bank(iobase, BANK2);
+ len = inb(iobase+FIR_RX_DSR_HI) & 0x0f;
+ len = len << 8;
+ len |= inb(iobase+FIR_RX_DSR_LO);
+
+ pr_debug("%s(), RX Length = 0x%.2x,\n", __func__ , len);
+ pr_debug("%s(), RX Status = 0x%.2x,\n", __func__ , status);
+
+ if (st_fifo->tail >= MAX_RX_WINDOW) {
+ pr_debug("%s(), window is full!\n", __func__);
+ continue;
+ }
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+
+ for (i=0; i<=MessageCount; i++)
+ {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->pending_bytes -= len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
+ {
+ pr_debug("%s(), ************* RX Errors ************\n",
+ __func__);
+
+ /* Skip frame */
+ self->netdev->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & LSR_FIFO_UR)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ************* FIFO Errors ************\n",
+ __func__);
+ }
+ if (status & LSR_FRAME_ERROR)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ************* FRAME Errors ************\n",
+ __func__);
+ }
+
+ if (status & LSR_CRC_ERROR)
+ {
+ self->netdev->stats.rx_crc_errors++;
+ pr_debug("%s(), ************* CRC Errors ************\n",
+ __func__);
+ }
+
+ if(self->rcvFramesOverflow)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ************* Overran DMA buffer ************\n",
+ __func__);
+ }
+ if(len == 0)
+ {
+ self->netdev->stats.rx_frame_errors++;
+ pr_debug("%s(), ********** Receive Frame Size = 0 *********\n",
+ __func__);
+ }
+ }
+ else
+ {
+
+ if (st_fifo->pending_bytes < 32)
+ {
+ switch_bank(iobase, BANK0);
+ val = inb(iobase+FIR_BSR);
+ if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
+ {
+ pr_debug("%s(), ************* BSR_FIFO_NOT_EMPTY ************\n",
+ __func__);
+
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->pending_bytes += len;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+
+ /*
+ * DMA not finished yet, so try again
+ * later, set timer value, resolution
+ * 500 us
+ */
+
+ switch_bank(iobase, BANK1);
+ outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM
+
+ /* Enable Timer */
+ outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
+
+ return FALSE; /* I'll be back! */
+ }
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ self->stamp = ktime_get();
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL)
+ {
+ self->netdev->stats.rx_dropped++;
+
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC, CRC is removed by hardware*/
+ skb_put(skb, len);
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len);
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ }
+ }
+
+ switch_bank(iobase, BANK0);
+
+ return TRUE;
+}
+
+
+
+/*
+ * Function ali_ircc_sir_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __u32 speed;
+
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ iobase = self->io.sir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Note : you should make sure that speed changes are not going
+ * to corrupt any outgoing frame. Look at nsc-ircc for the gory
+ * details - Jean II */
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ ali_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ self->netdev->stats.tx_bytes += self->tx_buff.len;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, iobase+UART_IER);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Function ali_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct ali_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ pr_debug("%s(), SIOCSBANDWIDTH\n", __func__);
+ /*
+ * This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ if (!in_interrupt() && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ali_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ pr_debug("%s(), SIOCSMEDIABUSY\n", __func__);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ pr_debug("%s(), SIOCGRECEIVING\n", __func__);
+ /* This is protected */
+ irq->ifr_receiving = ali_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+
+ return ret;
+}
+
+/*
+ * Function ali_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
+{
+ unsigned long flags;
+ int status = FALSE;
+ int iobase;
+
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ if (self->io.speed > 115200)
+ {
+ iobase = self->io.fir_base;
+
+ switch_bank(iobase, BANK1);
+ if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
+ {
+ /* We are receiving something */
+ pr_debug("%s(), We are receiving something\n",
+ __func__);
+ status = TRUE;
+ }
+ switch_bank(iobase, BANK0);
+ }
+ else
+ {
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+ }
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+
+ return status;
+}
+
+static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct ali_ircc_cb *self = platform_get_drvdata(dev);
+
+ net_info_ratelimited("%s, Suspending\n", ALI_IRCC_DRIVER_NAME);
+
+ if (self->io.suspended)
+ return 0;
+
+ ali_ircc_net_close(self->netdev);
+
+ self->io.suspended = 1;
+
+ return 0;
+}
+
+static int ali_ircc_resume(struct platform_device *dev)
+{
+ struct ali_ircc_cb *self = platform_get_drvdata(dev);
+
+ if (!self->io.suspended)
+ return 0;
+
+ ali_ircc_net_open(self->netdev);
+
+ net_info_ratelimited("%s, Waking up\n", ALI_IRCC_DRIVER_NAME);
+
+ self->io.suspended = 0;
+
+ return 0;
+}
+
+/* ALi Chip Function */
+
+static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
+{
+
+ unsigned char newMask;
+
+ int iobase = self->io.fir_base; /* or sir_base */
+
+ pr_debug("%s(), -------- Start -------- ( Enable = %d )\n",
+ __func__, enable);
+
+ /* Enable the interrupt which we wish to */
+ if (enable){
+ if (self->io.direction == IO_XMIT)
+ {
+ if (self->io.speed > 115200) /* FIR, MIR */
+ {
+ newMask = self->ier;
+ }
+ else /* SIR */
+ {
+ newMask = UART_IER_THRI | UART_IER_RDI;
+ }
+ }
+ else {
+ if (self->io.speed > 115200) /* FIR, MIR */
+ {
+ newMask = self->ier;
+ }
+ else /* SIR */
+ {
+ newMask = UART_IER_RDI;
+ }
+ }
+ }
+ else /* Disable all the interrupts */
+ {
+ newMask = 0x00;
+
+ }
+
+ //SIR and FIR has different registers
+ if (self->io.speed > 115200)
+ {
+ switch_bank(iobase, BANK0);
+ outb(newMask, iobase+FIR_IER);
+ }
+ else
+ outb(newMask, iobase+UART_IER);
+
+}
+
+static void SIR2FIR(int iobase)
+{
+ //unsigned char tmp;
+
+
+ /* Already protected (change_speed() or setup()), no need to lock.
+ * Jean II */
+
+ outb(0x28, iobase+UART_MCR);
+ outb(0x68, iobase+UART_MCR);
+ outb(0x88, iobase+UART_MCR);
+
+ outb(0x60, iobase+FIR_MCR); /* Master Reset */
+ outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
+
+ //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */
+ //tmp |= 0x20;
+ //outb(tmp, iobase+FIR_LCR_B);
+
+}
+
+static void FIR2SIR(int iobase)
+{
+ unsigned char val;
+
+
+ /* Already protected (change_speed() or setup()), no need to lock.
+ * Jean II */
+
+ outb(0x20, iobase+FIR_MCR); /* IRQ to low */
+ outb(0x00, iobase+UART_IER);
+
+ outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */
+ outb(0x00, iobase+UART_FCR);
+ outb(0x07, iobase+UART_FCR);
+
+ val = inb(iobase+UART_RX);
+ val = inb(iobase+UART_LSR);
+ val = inb(iobase+UART_MSR);
+
+}
+
+MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
+MODULE_DESCRIPTION("ALi FIR Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" ALI_IRCC_DRIVER_NAME);
+
+
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+module_param_hw_array(dma, int, dma, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channels");
+
+module_init(ali_ircc_init);
+module_exit(ali_ircc_cleanup);
diff --git a/drivers/staging/irda/drivers/ali-ircc.h b/drivers/staging/irda/drivers/ali-ircc.h
new file mode 100644
index 000000000000..c2d9747a5108
--- /dev/null
+++ b/drivers/staging/irda/drivers/ali-ircc.h
@@ -0,0 +1,227 @@
+/*********************************************************************
+ *
+ * Filename: ali-ircc.h
+ * Version: 0.5
+ * Description: Driver for the ALI M1535D and M1543C FIR Controller
+ * Status: Experimental.
+ * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
+ * Created at: 2000/10/16 03:46PM
+ * Modified at: 2001/1/3 02:56PM
+ * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
+ *
+ * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef ALI_IRCC_H
+#define ALI_IRCC_H
+
+#include <linux/ktime.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* SIR Register */
+/* Usr definition of linux/serial_reg.h */
+
+/* FIR Register */
+#define BANK0 0x20
+#define BANK1 0x21
+#define BANK2 0x22
+#define BANK3 0x23
+
+#define FIR_MCR 0x07 /* Master Control Register */
+
+/* Bank 0 */
+#define FIR_DR 0x00 /* Alias 0, FIR Data Register (R/W) */
+#define FIR_IER 0x01 /* Alias 1, FIR Interrupt Enable Register (R/W) */
+#define FIR_IIR 0x02 /* Alias 2, FIR Interrupt Identification Register (Read only) */
+#define FIR_LCR_A 0x03 /* Alias 3, FIR Line Control Register A (R/W) */
+#define FIR_LCR_B 0x04 /* Alias 4, FIR Line Control Register B (R/W) */
+#define FIR_LSR 0x05 /* Alias 5, FIR Line Status Register (R/W) */
+#define FIR_BSR 0x06 /* Alias 6, FIR Bus Status Register (Read only) */
+
+
+ /* Alias 1 */
+ #define IER_FIFO 0x10 /* FIR FIFO Interrupt Enable */
+ #define IER_TIMER 0x20 /* Timer Interrupt Enable */
+ #define IER_EOM 0x40 /* End of Message Interrupt Enable */
+ #define IER_ACT 0x80 /* Active Frame Interrupt Enable */
+
+ /* Alias 2 */
+ #define IIR_FIFO 0x10 /* FIR FIFO Interrupt */
+ #define IIR_TIMER 0x20 /* Timer Interrupt */
+ #define IIR_EOM 0x40 /* End of Message Interrupt */
+ #define IIR_ACT 0x80 /* Active Frame Interrupt */
+
+ /* Alias 3 */
+ #define LCR_A_FIFO_RESET 0x80 /* FIFO Reset */
+
+ /* Alias 4 */
+ #define LCR_B_BW 0x10 /* Brick Wall */
+ #define LCR_B_SIP 0x20 /* SIP Enable */
+ #define LCR_B_TX_MODE 0x40 /* Transmit Mode */
+ #define LCR_B_RX_MODE 0x80 /* Receive Mode */
+
+ /* Alias 5 */
+ #define LSR_FIR_LSA 0x00 /* FIR Line Status Address */
+ #define LSR_FRAME_ABORT 0x08 /* Frame Abort */
+ #define LSR_CRC_ERROR 0x10 /* CRC Error */
+ #define LSR_SIZE_ERROR 0x20 /* Size Error */
+ #define LSR_FRAME_ERROR 0x40 /* Frame Error */
+ #define LSR_FIFO_UR 0x80 /* FIFO Underrun */
+ #define LSR_FIFO_OR 0x80 /* FIFO Overrun */
+
+ /* Alias 6 */
+ #define BSR_FIFO_NOT_EMPTY 0x80 /* FIFO Not Empty */
+
+/* Bank 1 */
+#define FIR_CR 0x00 /* Alias 0, FIR Configuration Register (R/W) */
+#define FIR_FIFO_TR 0x01 /* Alias 1, FIR FIFO Threshold Register (R/W) */
+#define FIR_DMA_TR 0x02 /* Alias 2, FIR DMA Threshold Register (R/W) */
+#define FIR_TIMER_IIR 0x03 /* Alias 3, FIR Timer interrupt interval register (W/O) */
+#define FIR_FIFO_FR 0x03 /* Alias 3, FIR FIFO Flag register (R/O) */
+#define FIR_FIFO_RAR 0x04 /* Alias 4, FIR FIFO Read Address register (R/O) */
+#define FIR_FIFO_WAR 0x05 /* Alias 5, FIR FIFO Write Address register (R/O) */
+#define FIR_TR 0x06 /* Alias 6, Test REgister (W/O) */
+
+ /* Alias 0 */
+ #define CR_DMA_EN 0x01 /* DMA Enable */
+ #define CR_DMA_BURST 0x02 /* DMA Burst Mode */
+ #define CR_TIMER_EN 0x08 /* Timer Enable */
+
+ /* Alias 3 */
+ #define TIMER_IIR_500 0x00 /* 500 us */
+ #define TIMER_IIR_1ms 0x01 /* 1 ms */
+ #define TIMER_IIR_2ms 0x02 /* 2 ms */
+ #define TIMER_IIR_4ms 0x03 /* 4 ms */
+
+/* Bank 2 */
+#define FIR_IRDA_CR 0x00 /* Alias 0, IrDA Control Register (R/W) */
+#define FIR_BOF_CR 0x01 /* Alias 1, BOF Count Register (R/W) */
+#define FIR_BW_CR 0x02 /* Alias 2, Brick Wall Count Register (R/W) */
+#define FIR_TX_DSR_HI 0x03 /* Alias 3, TX Data Size Register (high) (R/W) */
+#define FIR_TX_DSR_LO 0x04 /* Alias 4, TX Data Size Register (low) (R/W) */
+#define FIR_RX_DSR_HI 0x05 /* Alias 5, RX Data Size Register (high) (R/W) */
+#define FIR_RX_DSR_LO 0x06 /* Alias 6, RX Data Size Register (low) (R/W) */
+
+ /* Alias 0 */
+ #define IRDA_CR_HDLC1152 0x80 /* 1.152Mbps HDLC Select */
+ #define IRDA_CR_CRC 0X40 /* CRC Select. */
+ #define IRDA_CR_HDLC 0x20 /* HDLC select. */
+ #define IRDA_CR_HP_MODE 0x10 /* HP mode (read only) */
+ #define IRDA_CR_SD_ST 0x08 /* SD/MODE State. */
+ #define IRDA_CR_FIR_SIN 0x04 /* FIR SIN Select. */
+ #define IRDA_CR_ITTX_0 0x02 /* SOUT State. IRTX force to 0 */
+ #define IRDA_CR_ITTX_1 0x03 /* SOUT State. IRTX force to 1 */
+
+/* Bank 3 */
+#define FIR_ID_VR 0x00 /* Alias 0, FIR ID Version Register (R/O) */
+#define FIR_MODULE_CR 0x01 /* Alias 1, FIR Module Control Register (R/W) */
+#define FIR_IO_BASE_HI 0x02 /* Alias 2, FIR Higher I/O Base Address Register (R/O) */
+#define FIR_IO_BASE_LO 0x03 /* Alias 3, FIR Lower I/O Base Address Register (R/O) */
+#define FIR_IRQ_CR 0x04 /* Alias 4, FIR IRQ Channel Register (R/O) */
+#define FIR_DMA_CR 0x05 /* Alias 5, FIR DMA Channel Register (R/O) */
+
+struct ali_chip {
+ char *name;
+ int cfg[2];
+ unsigned char entr1;
+ unsigned char entr2;
+ unsigned char cid_index;
+ unsigned char cid_value;
+ int (*probe)(struct ali_chip *chip, chipio_t *info);
+ int (*init)(struct ali_chip *chip, chipio_t *info);
+};
+typedef struct ali_chip ali_chip_t;
+
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+#define TX_FIFO_Threshold 8
+#define RX_FIFO_Threshold 1
+#define TX_DMA_Threshold 1
+#define RX_DMA_Threshold 1
+
+/* For storing entries in the status FIFO */
+
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Length of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+/* Private data for each instance */
+struct ali_ircc_cb {
+
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ __u8 InterruptID; /* Interrupt ID */
+ __u8 BusStatus; /* Bus Status */
+ __u8 LineStatus; /* Line Status */
+
+ unsigned char rcvFramesOverflow;
+
+ ktime_t stamp;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ unsigned char fifo_opti_buf;
+};
+
+static inline void switch_bank(int iobase, int bank)
+{
+ outb(bank, iobase+FIR_MCR);
+}
+
+#endif /* ALI_IRCC_H */
diff --git a/drivers/staging/irda/drivers/au1k_ir.c b/drivers/staging/irda/drivers/au1k_ir.c
new file mode 100644
index 000000000000..be4ea6aa57a9
--- /dev/null
+++ b/drivers/staging/irda/drivers/au1k_ir.c
@@ -0,0 +1,989 @@
+/*
+ * Alchemy Semi Au1000 IrDA driver
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/* registers */
+#define IR_RING_PTR_STATUS 0x00
+#define IR_RING_BASE_ADDR_H 0x04
+#define IR_RING_BASE_ADDR_L 0x08
+#define IR_RING_SIZE 0x0C
+#define IR_RING_PROMPT 0x10
+#define IR_RING_ADDR_CMPR 0x14
+#define IR_INT_CLEAR 0x18
+#define IR_CONFIG_1 0x20
+#define IR_SIR_FLAGS 0x24
+#define IR_STATUS 0x28
+#define IR_READ_PHY_CONFIG 0x2C
+#define IR_WRITE_PHY_CONFIG 0x30
+#define IR_MAX_PKT_LEN 0x34
+#define IR_RX_BYTE_CNT 0x38
+#define IR_CONFIG_2 0x3C
+#define IR_ENABLE 0x40
+
+/* Config1 */
+#define IR_RX_INVERT_LED (1 << 0)
+#define IR_TX_INVERT_LED (1 << 1)
+#define IR_ST (1 << 2)
+#define IR_SF (1 << 3)
+#define IR_SIR (1 << 4)
+#define IR_MIR (1 << 5)
+#define IR_FIR (1 << 6)
+#define IR_16CRC (1 << 7)
+#define IR_TD (1 << 8)
+#define IR_RX_ALL (1 << 9)
+#define IR_DMA_ENABLE (1 << 10)
+#define IR_RX_ENABLE (1 << 11)
+#define IR_TX_ENABLE (1 << 12)
+#define IR_LOOPBACK (1 << 14)
+#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
+ IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
+ IR_16CRC)
+
+/* ir_status */
+#define IR_RX_STATUS (1 << 9)
+#define IR_TX_STATUS (1 << 10)
+#define IR_PHYEN (1 << 15)
+
+/* ir_write_phy_config */
+#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
+#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
+#define IR_P(x) ((x) & 0x1f) /* preamble bits */
+
+/* Config2 */
+#define IR_MODE_INV (1 << 0)
+#define IR_ONE_PIN (1 << 1)
+#define IR_PHYCLK_40MHZ (0 << 2)
+#define IR_PHYCLK_48MHZ (1 << 2)
+#define IR_PHYCLK_56MHZ (2 << 2)
+#define IR_PHYCLK_64MHZ (3 << 2)
+#define IR_DP (1 << 4)
+#define IR_DA (1 << 5)
+#define IR_FLT_HIGH (0 << 6)
+#define IR_FLT_MEDHI (1 << 6)
+#define IR_FLT_MEDLO (2 << 6)
+#define IR_FLT_LO (3 << 6)
+#define IR_IEN (1 << 8)
+
+/* ir_enable */
+#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
+#define IR_CE (1 << 2) /* clock enable */
+#define IR_C (1 << 1) /* coherency bit */
+#define IR_BE (1 << 0) /* set in big endian mode */
+
+#define NUM_IR_DESC 64
+#define RING_SIZE_4 0x0
+#define RING_SIZE_16 0x3
+#define RING_SIZE_64 0xF
+#define MAX_NUM_IR_DESC 64
+#define MAX_BUF_SIZE 2048
+
+/* Ring descriptor flags */
+#define AU_OWN (1 << 7) /* tx,rx */
+#define IR_DIS_CRC (1 << 6) /* tx */
+#define IR_BAD_CRC (1 << 5) /* tx */
+#define IR_NEED_PULSE (1 << 4) /* tx */
+#define IR_FORCE_UNDER (1 << 3) /* tx */
+#define IR_DISABLE_TX (1 << 2) /* tx */
+#define IR_HW_UNDER (1 << 0) /* tx */
+#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
+
+#define IR_PHY_ERROR (1 << 6) /* rx */
+#define IR_CRC_ERROR (1 << 5) /* rx */
+#define IR_MAX_LEN (1 << 4) /* rx */
+#define IR_FIFO_OVER (1 << 3) /* rx */
+#define IR_SIR_ERROR (1 << 2) /* rx */
+#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
+ IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
+
+struct db_dest {
+ struct db_dest *pnext;
+ volatile u32 *vaddr;
+ dma_addr_t dma_addr;
+};
+
+struct ring_dest {
+ u8 count_0; /* 7:0 */
+ u8 count_1; /* 12:8 */
+ u8 reserved;
+ u8 flags;
+ u8 addr_0; /* 7:0 */
+ u8 addr_1; /* 15:8 */
+ u8 addr_2; /* 23:16 */
+ u8 addr_3; /* 31:24 */
+};
+
+/* Private data for each instance */
+struct au1k_private {
+ void __iomem *iobase;
+ int irq_rx, irq_tx;
+
+ struct db_dest *pDBfree;
+ struct db_dest db[2 * NUM_IR_DESC];
+ volatile struct ring_dest *rx_ring[NUM_IR_DESC];
+ volatile struct ring_dest *tx_ring[NUM_IR_DESC];
+ struct db_dest *rx_db_inuse[NUM_IR_DESC];
+ struct db_dest *tx_db_inuse[NUM_IR_DESC];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ iobuff_t rx_buff;
+
+ struct net_device *netdev;
+ struct qos_info qos;
+ struct irlap_cb *irlap;
+
+ u8 open;
+ u32 speed;
+ u32 newspeed;
+
+ struct resource *ioarea;
+ struct au1k_irda_platform_data *platdata;
+ struct clk *irda_clk;
+};
+
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
+{
+ if (p->platdata && p->platdata->set_phy_mode)
+ p->platdata->set_phy_mode(mode);
+}
+
+static inline unsigned long irda_read(struct au1k_private *p,
+ unsigned long ofs)
+{
+ /*
+ * IrDA peripheral bug. You have to read the register
+ * twice to get the right value.
+ */
+ (void)__raw_readl(p->iobase + ofs);
+ return __raw_readl(p->iobase + ofs);
+}
+
+static inline void irda_write(struct au1k_private *p, unsigned long ofs,
+ unsigned long val)
+{
+ __raw_writel(val, p->iobase + ofs);
+ wmb();
+}
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static struct db_dest *GetFreeDB(struct au1k_private *aup)
+{
+ struct db_dest *db;
+ db = aup->pDBfree;
+
+ if (db)
+ aup->pDBfree = db->pnext;
+ return db;
+}
+
+/*
+ DMA memory allocation, derived from pci_alloc_consistent.
+ However, the Au1000 data cache is coherent (when programmed
+ so), therefore we return KSEG0 address, not KSEG1.
+*/
+static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
+{
+ void *ret;
+ int gfp = GFP_ATOMIC | GFP_DMA;
+
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ ret = (void *)KSEG0ADDR(ret);
+ }
+ return ret;
+}
+
+static void dma_free(void *vaddr, size_t size)
+{
+ vaddr = (void *)KSEG0ADDR(vaddr);
+ free_pages((unsigned long) vaddr, get_order(size));
+}
+
+
+static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
+{
+ int i;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->rx_ring[i] = (volatile struct ring_dest *)
+ (rx_base + sizeof(struct ring_dest) * i);
+ }
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->tx_ring[i] = (volatile struct ring_dest *)
+ (tx_base + sizeof(struct ring_dest) * i);
+ }
+}
+
+static int au1k_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int au1k_irda_set_speed(struct net_device *dev, int speed)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *ptxd;
+ unsigned long control;
+ int ret = 0, timeout = 10, i;
+
+ if (speed == aup->speed)
+ return ret;
+
+ /* disable PHY first */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
+
+ /* disable RX/TX */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
+ msleep(20);
+ while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
+ msleep(20);
+ if (!timeout--) {
+ printk(KERN_ERR "%s: rx/tx disable timeout\n",
+ dev->name);
+ break;
+ }
+ }
+
+ /* disable DMA */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
+ msleep(20);
+
+ /* After we disable tx/rx. the index pointers go back to zero. */
+ aup->tx_head = aup->tx_tail = aup->rx_head = 0;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->tx_ring[i];
+ ptxd->flags = 0;
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ }
+
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->rx_ring[i];
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ ptxd->flags = AU_OWN;
+ }
+
+ if (speed == 4000000)
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
+ else
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
+
+ switch (speed) {
+ case 9600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 19200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 38400:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 57600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 115200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 4000000:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
+ irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
+ IR_RX_ENABLE);
+ break;
+ default:
+ printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ ret = -EINVAL;
+ break;
+ }
+
+ aup->speed = speed;
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
+
+ control = irda_read(aup, IR_STATUS);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ if (control & (1 << 14)) {
+ printk(KERN_ERR "%s: configuration error\n", dev->name);
+ } else {
+ if (control & (1 << 11))
+ printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ if (control & (1 << 12))
+ printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ if (control & (1 << 13))
+ printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ if (control & (1 << 10))
+ printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ if (control & (1 << 9))
+ printk(KERN_DEBUG "%s RX enabled\n", dev->name);
+ }
+
+ return ret;
+}
+
+static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+
+ if (status & IR_RX_ERROR) {
+ ps->rx_errors++;
+ if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
+ ps->rx_missed_errors++;
+ if (status & IR_MAX_LEN)
+ ps->rx_length_errors++;
+ if (status & IR_CRC_ERROR)
+ ps->rx_crc_errors++;
+ } else
+ ps->rx_bytes += count;
+}
+
+static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->tx_packets++;
+ ps->tx_bytes += pkt_len;
+
+ if (status & IR_TX_ERROR) {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+}
+
+static void au1k_tx_ack(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *ptxd;
+
+ ptxd = aup->tx_ring[aup->tx_tail];
+ while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
+ update_tx_stats(dev, ptxd->flags,
+ (ptxd->count_1 << 8) | ptxd->count_0);
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ wmb();
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
+ ptxd = aup->tx_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+
+ if (aup->tx_tail == aup->tx_head) {
+ if (aup->newspeed) {
+ au1k_irda_set_speed(dev, aup->newspeed);
+ aup->newspeed = 0;
+ } else {
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
+ }
+ }
+}
+
+static int au1k_irda_rx(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *prxd;
+ struct sk_buff *skb;
+ struct db_dest *pDB;
+ u32 flags, count;
+
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ while (!(flags & AU_OWN)) {
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ count = (prxd->count_1 << 8) | prxd->count_0;
+ if (!(flags & IR_RX_ERROR)) {
+ /* good frame */
+ update_rx_stats(dev, flags, count);
+ skb = alloc_skb(count + 1, GFP_ATOMIC);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ if (aup->speed == 4000000)
+ skb_put(skb, count);
+ else
+ skb_put(skb, count - 2);
+ skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
+ count - 2);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ prxd->count_0 = 0;
+ prxd->count_1 = 0;
+ }
+ prxd->flags |= AU_OWN;
+ aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ /* next descriptor */
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ }
+ return 0;
+}
+
+static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
+
+ au1k_irda_rx(dev);
+ au1k_tx_ack(dev);
+
+ return IRQ_HANDLED;
+}
+
+static int au1k_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ u32 enable, ring_address, phyck;
+ struct clk *c;
+ int i;
+
+ c = clk_get(NULL, "irda_clk");
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ i = clk_prepare_enable(c);
+ if (i) {
+ clk_put(c);
+ return i;
+ }
+
+ switch (clk_get_rate(c)) {
+ case 40000000:
+ phyck = IR_PHYCLK_40MHZ;
+ break;
+ case 48000000:
+ phyck = IR_PHYCLK_48MHZ;
+ break;
+ case 56000000:
+ phyck = IR_PHYCLK_56MHZ;
+ break;
+ case 64000000:
+ phyck = IR_PHYCLK_64MHZ;
+ break;
+ default:
+ clk_disable_unprepare(c);
+ clk_put(c);
+ return -EINVAL;
+ }
+ aup->irda_clk = c;
+
+ enable = IR_HC | IR_CE | IR_C;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ enable |= IR_BE;
+#endif
+ aup->tx_head = 0;
+ aup->tx_tail = 0;
+ aup->rx_head = 0;
+
+ for (i = 0; i < NUM_IR_DESC; i++)
+ aup->rx_ring[i]->flags = AU_OWN;
+
+ irda_write(aup, IR_ENABLE, enable);
+ msleep(20);
+
+ /* disable PHY */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
+ msleep(20);
+
+ irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
+
+ ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
+ irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
+ irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
+
+ irda_write(aup, IR_RING_SIZE,
+ (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
+
+ irda_write(aup, IR_CONFIG_2, phyck | IR_ONE_PIN);
+ irda_write(aup, IR_RING_ADDR_CMPR, 0);
+
+ au1k_irda_set_speed(dev, 9600);
+ return 0;
+}
+
+static int au1k_irda_start(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ char hwname[32];
+ int retval;
+
+ retval = au1k_init(dev);
+ if (retval) {
+ printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
+ return retval;
+ }
+
+ retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+ retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ free_irq(aup->irq_tx, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
+ /* Give self a hardware name */
+ sprintf(hwname, "Au1000 SIR/FIR");
+ aup->irlap = irlap_open(dev, &aup->qos, hwname);
+ netif_start_queue(dev);
+
+ /* int enable */
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
+
+ /* power up */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
+
+ return 0;
+}
+
+static int au1k_irda_stop(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+
+ /* disable interrupts */
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
+ irda_write(aup, IR_CONFIG_1, 0);
+ irda_write(aup, IR_ENABLE, 0); /* disable clock */
+
+ if (aup->irlap) {
+ irlap_close(aup->irlap);
+ aup->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+ /* disable the interrupt */
+ free_irq(aup->irq_tx, dev);
+ free_irq(aup->irq_rx, dev);
+
+ clk_disable_unprepare(aup->irda_clk);
+ clk_put(aup->irda_clk);
+
+ return 0;
+}
+
+/*
+ * Au1000 transmit routine.
+ */
+static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+ volatile struct ring_dest *ptxd;
+ struct db_dest *pDB;
+ u32 len, flags;
+
+ if (speed != aup->speed && speed != -1)
+ aup->newspeed = speed;
+
+ if ((skb->len == 0) && (aup->newspeed)) {
+ if (aup->tx_tail == aup->tx_head) {
+ au1k_irda_set_speed(dev, speed);
+ aup->newspeed = 0;
+ }
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ ptxd = aup->tx_ring[aup->tx_head];
+ flags = ptxd->flags;
+
+ if (flags & AU_OWN) {
+ printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
+ printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return 1;
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+
+#if 0
+ if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
+ printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
+ irda_read(aup, IR_RX_BYTE_CNT));
+ }
+#endif
+
+ if (aup->speed == 4000000) {
+ /* FIR */
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
+ ptxd->count_0 = skb->len & 0xff;
+ ptxd->count_1 = (skb->len >> 8) & 0xff;
+ } else {
+ /* SIR */
+ len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
+ ptxd->count_0 = len & 0xff;
+ ptxd->count_1 = (len >> 8) & 0xff;
+ ptxd->flags |= IR_DIS_CRC;
+ }
+ ptxd->flags |= AU_OWN;
+ wmb();
+
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1k_tx_timeout(struct net_device *dev)
+{
+ u32 speed;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: tx timeout\n", dev->name);
+ speed = aup->speed;
+ aup->speed = 0;
+ au1k_irda_set_speed(dev, speed);
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+}
+
+static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct au1k_private *aup = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (aup->open)
+ ret = au1k_irda_set_speed(dev,
+ rq->ifr_baudrate);
+ else {
+ printk(KERN_ERR "%s ioctl: !netif_running\n",
+ dev->name);
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static const struct net_device_ops au1k_irda_netdev_ops = {
+ .ndo_open = au1k_irda_start,
+ .ndo_stop = au1k_irda_stop,
+ .ndo_start_xmit = au1k_irda_hard_xmit,
+ .ndo_tx_timeout = au1k_tx_timeout,
+ .ndo_do_ioctl = au1k_irda_ioctl,
+};
+
+static int au1k_irda_net_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct db_dest *pDB, *pDBfree;
+ int i, err, retval = 0;
+ dma_addr_t temp;
+
+ err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
+ if (err)
+ goto out1;
+
+ dev->netdev_ops = &au1k_irda_netdev_ops;
+
+ irda_init_max_qos_capabilies(&aup->qos);
+
+ /* The only value we must override it the baudrate */
+ aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
+ IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
+
+ aup->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&aup->qos);
+
+ retval = -ENOMEM;
+
+ /* Tx ring follows rx ring + 512 bytes */
+ /* we need a 1k aligned buffer */
+ aup->rx_ring[0] = (struct ring_dest *)
+ dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
+ &temp);
+ if (!aup->rx_ring[0])
+ goto out2;
+
+ /* allocate the data buffers */
+ aup->db[0].vaddr =
+ dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ if (!aup->db[0].vaddr)
+ goto out3;
+
+ setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+
+ pDBfree = NULL;
+ pDB = aup->db;
+ for (i = 0; i < (2 * NUM_IR_DESC); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr =
+ (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ /* attach a data buffer to each descriptor */
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->tx_ring[i]->count_0 = 0;
+ aup->tx_ring[i]->count_1 = 0;
+ aup->tx_ring[i]->flags = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ return 0;
+
+out3:
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+out2:
+ kfree(aup->rx_buff.head);
+out1:
+ printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
+ return retval;
+}
+
+static int au1k_irda_probe(struct platform_device *pdev)
+{
+ struct au1k_private *aup;
+ struct net_device *dev;
+ struct resource *r;
+ struct clk *c;
+ int err;
+
+ dev = alloc_irdadev(sizeof(struct au1k_private));
+ if (!dev)
+ return -ENOMEM;
+
+ aup = netdev_priv(dev);
+
+ aup->platdata = pdev->dev.platform_data;
+
+ err = -EINVAL;
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r)
+ goto out;
+
+ aup->irq_tx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!r)
+ goto out;
+
+ aup->irq_rx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ goto out;
+
+ err = -EBUSY;
+ aup->ioarea = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!aup->ioarea)
+ goto out;
+
+ /* bail out early if clock doesn't exist */
+ c = clk_get(NULL, "irda_clk");
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto out;
+ }
+ clk_put(c);
+
+ aup->iobase = ioremap_nocache(r->start, resource_size(r));
+ if (!aup->iobase)
+ goto out2;
+
+ dev->irq = aup->irq_rx;
+
+ err = au1k_irda_net_init(dev);
+ if (err)
+ goto out3;
+ err = register_netdev(dev);
+ if (err)
+ goto out4;
+
+ platform_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ return 0;
+
+out4:
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+out3:
+ iounmap(aup->iobase);
+out2:
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+out:
+ free_netdev(dev);
+ return err;
+}
+
+static int au1k_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1k_private *aup = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+
+ iounmap(aup->iobase);
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1k_irda_driver = {
+ .driver = {
+ .name = "au1000-irda",
+ },
+ .probe = au1k_irda_probe,
+ .remove = au1k_irda_remove,
+};
+
+module_platform_driver(au1k_irda_driver);
+
+MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
+MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
diff --git a/drivers/staging/irda/drivers/bfin_sir.c b/drivers/staging/irda/drivers/bfin_sir.c
new file mode 100644
index 000000000000..3151b580dbd6
--- /dev/null
+++ b/drivers/staging/irda/drivers/bfin_sir.c
@@ -0,0 +1,817 @@
+/*
+ * Blackfin Infra-red Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ */
+#include "bfin_sir.h"
+
+#ifdef CONFIG_SIR_BFIN_DMA
+#define DMA_SIR_RX_XCNT 10
+#define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
+#define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
+#endif
+
+#if ANOMALY_05000447
+static int max_rate = 57600;
+#else
+static int max_rate = 115200;
+#endif
+
+static void turnaround_delay(int mtt)
+{
+ long ticks;
+
+ mtt = mtt < 10000 ? 10000 : mtt;
+ ticks = 1 + mtt / (USEC_PER_SEC / HZ);
+ schedule_timeout_uninterruptible(ticks);
+}
+
+static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
+{
+ int i;
+ struct resource *res;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = &pdev->resource[i];
+ switch (res->flags) {
+ case IORESOURCE_MEM:
+ sp->membase = (void __iomem *)res->start;
+ break;
+ case IORESOURCE_IRQ:
+ sp->irq = res->start;
+ break;
+ case IORESOURCE_DMA:
+ sp->rx_dma_channel = res->start;
+ sp->tx_dma_channel = res->end;
+ break;
+ default:
+ break;
+ }
+ }
+
+ sp->clk = get_sclk();
+#ifdef CONFIG_SIR_BFIN_DMA
+ sp->tx_done = 1;
+ init_timer(&(sp->rx_dma_timer));
+#endif
+}
+
+static void bfin_sir_stop_tx(struct bfin_sir_port *port)
+{
+#ifdef CONFIG_SIR_BFIN_DMA
+ disable_dma(port->tx_dma_channel);
+#endif
+
+ while (!(UART_GET_LSR(port) & THRE)) {
+ cpu_relax();
+ continue;
+ }
+
+ UART_CLEAR_IER(port, ETBEI);
+}
+
+static void bfin_sir_enable_tx(struct bfin_sir_port *port)
+{
+ UART_SET_IER(port, ETBEI);
+}
+
+static void bfin_sir_stop_rx(struct bfin_sir_port *port)
+{
+ UART_CLEAR_IER(port, ERBFI);
+}
+
+static void bfin_sir_enable_rx(struct bfin_sir_port *port)
+{
+ UART_SET_IER(port, ERBFI);
+}
+
+static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
+{
+ int ret = -EINVAL;
+ unsigned int quot;
+ unsigned short val, lsr, lcr;
+ static int utime;
+ int count = 10;
+
+ lcr = WLS(8);
+
+ switch (speed) {
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+
+ /*
+ * IRDA is not affected by anomaly 05000230, so there is no
+ * need to tweak the divisor like he UART driver (which will
+ * slightly speed up the baud rate on us).
+ */
+ quot = (port->clk + (8 * speed)) / (16 * speed);
+
+ do {
+ udelay(utime);
+ lsr = UART_GET_LSR(port);
+ } while (!(lsr & TEMT) && count--);
+
+ /* The useconds for 1 bits to transmit */
+ utime = 1000000 / speed + 1;
+
+ /* Clear UCEN bit to reset the UART state machine
+ * and control registers
+ */
+ val = UART_GET_GCTL(port);
+ val &= ~UCEN;
+ UART_PUT_GCTL(port, val);
+
+ /* Set DLAB in LCR to Access THR RBR IER */
+ UART_SET_DLAB(port);
+ SSYNC();
+
+ UART_PUT_DLL(port, quot & 0xFF);
+ UART_PUT_DLH(port, (quot >> 8) & 0xFF);
+ SSYNC();
+
+ /* Clear DLAB in LCR */
+ UART_CLEAR_DLAB(port);
+ SSYNC();
+
+ UART_PUT_LCR(port, lcr);
+
+ val = UART_GET_GCTL(port);
+ val |= UCEN;
+ UART_PUT_GCTL(port, val);
+
+ ret = 0;
+ break;
+ default:
+ printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
+ break;
+ }
+
+ val = UART_GET_GCTL(port);
+ /* If not add the 'RPOLC', we can't catch the receive interrupt.
+ * It's related with the HW layout and the IR transiver.
+ */
+ val |= UMOD_IRDA | RPOLC;
+ UART_PUT_GCTL(port, val);
+ return ret;
+}
+
+static int bfin_sir_is_receiving(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (!(UART_GET_IER(port) & ERBFI))
+ return 0;
+ return self->rx_buff.state != OUTSIDE_FRAME;
+}
+
+#ifdef CONFIG_SIR_BFIN_PIO
+static void bfin_sir_tx_chars(struct net_device *dev)
+{
+ unsigned int chr;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (self->tx_buff.len != 0) {
+ chr = *(self->tx_buff.data);
+ UART_PUT_CHAR(port, chr);
+ self->tx_buff.data++;
+ self->tx_buff.len--;
+ } else {
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_stop_tx(port);
+ bfin_sir_enable_rx(port);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ }
+}
+
+static void bfin_sir_rx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned char ch;
+
+ UART_CLEAR_LSR(port);
+ ch = UART_GET_CHAR(port);
+ async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
+}
+
+static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ while ((UART_GET_LSR(port) & DR))
+ bfin_sir_rx_chars(dev);
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ if (UART_GET_LSR(port) & THRE)
+ bfin_sir_tx_chars(dev);
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SIR_BFIN_PIO */
+
+#ifdef CONFIG_SIR_BFIN_DMA
+static void bfin_sir_dma_tx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ if (!port->tx_done)
+ return;
+ port->tx_done = 0;
+
+ if (self->tx_buff.len == 0) {
+ self->stats.tx_packets++;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_enable_rx(port);
+ port->tx_done = 1;
+ netif_wake_queue(dev);
+ return;
+ }
+
+ blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
+ (unsigned long)(self->tx_buff.data+self->tx_buff.len));
+ set_dma_config(port->tx_dma_channel,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
+ INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
+ DMA_SYNC_RESTART));
+ set_dma_start_addr(port->tx_dma_channel,
+ (unsigned long)(self->tx_buff.data));
+ set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
+ set_dma_x_modify(port->tx_dma_channel, 1);
+ enable_dma(port->tx_dma_channel);
+}
+
+static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+
+ spin_lock(&self->lock);
+ if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
+ clear_dma_irqstat(port->tx_dma_channel);
+ bfin_sir_stop_tx(port);
+
+ self->stats.tx_packets++;
+ self->stats.tx_bytes += self->tx_buff.len;
+ self->tx_buff.len = 0;
+ if (self->newspeed) {
+ bfin_sir_set_speed(port, self->newspeed);
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_enable_rx(port);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ port->tx_done = 1;
+ }
+ spin_unlock(&self->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void bfin_sir_dma_rx_chars(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int i;
+
+ UART_CLEAR_LSR(port);
+
+ for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
+ async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
+}
+
+void bfin_sir_rx_dma_timeout(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int x_pos, pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
+ if (x_pos == DMA_SIR_RX_XCNT)
+ x_pos = 0;
+
+ pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
+
+ if (pos > port->rx_dma_buf.tail) {
+ port->rx_dma_buf.tail = pos;
+ bfin_sir_dma_rx_chars(dev);
+ port->rx_dma_buf.head = port->rx_dma_buf.tail;
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned short irqstat;
+
+ spin_lock(&self->lock);
+
+ port->rx_dma_nrows++;
+ port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
+ bfin_sir_dma_rx_chars(dev);
+ if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
+ port->rx_dma_nrows = 0;
+ port->rx_dma_buf.tail = 0;
+ }
+ port->rx_dma_buf.head = port->rx_dma_buf.tail;
+
+ irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
+ clear_dma_irqstat(port->rx_dma_channel);
+ spin_unlock(&self->lock);
+
+ mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SIR_BFIN_DMA */
+
+static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
+{
+#ifdef CONFIG_SIR_BFIN_DMA
+ dma_addr_t dma_handle;
+#endif /* CONFIG_SIR_BFIN_DMA */
+
+ if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
+ dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
+ return -EBUSY;
+ }
+
+ if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
+ dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
+ free_dma(port->rx_dma_channel);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_SIR_BFIN_DMA
+
+ set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
+ set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
+
+ port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_handle, GFP_DMA);
+ port->rx_dma_buf.head = 0;
+ port->rx_dma_buf.tail = 0;
+ port->rx_dma_nrows = 0;
+
+ set_dma_config(port->rx_dma_channel,
+ set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
+ INTR_ON_ROW, DIMENSION_2D,
+ DATA_SIZE_8, DMA_SYNC_RESTART));
+ set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
+ set_dma_x_modify(port->rx_dma_channel, 1);
+ set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
+ set_dma_y_modify(port->rx_dma_channel, 1);
+ set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
+ enable_dma(port->rx_dma_channel);
+
+ port->rx_dma_timer.data = (unsigned long)(dev);
+ port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
+
+#else
+
+ if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
+ dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
+ return -EBUSY;
+ }
+
+ if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
+ dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
+ free_irq(port->irq, dev);
+ return -EBUSY;
+ }
+#endif
+
+ return 0;
+}
+
+static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
+{
+ unsigned short val;
+
+ bfin_sir_stop_rx(port);
+
+ val = UART_GET_GCTL(port);
+ val &= ~(UCEN | UMOD_MASK | RPOLC);
+ UART_PUT_GCTL(port, val);
+
+#ifdef CONFIG_SIR_BFIN_DMA
+ disable_dma(port->tx_dma_channel);
+ disable_dma(port->rx_dma_channel);
+ del_timer(&(port->rx_dma_timer));
+ dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
+#else
+ free_irq(port->irq+1, dev);
+ free_irq(port->irq, dev);
+#endif
+ free_dma(port->tx_dma_channel);
+ free_dma(port->rx_dma_channel);
+}
+
+#ifdef CONFIG_PM
+static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ if (self->open) {
+ flush_work(&self->work);
+ bfin_sir_shutdown(self->sir_port, dev);
+ netif_device_detach(dev);
+ }
+
+ return 0;
+}
+static int bfin_sir_resume(struct platform_device *pdev)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+ struct bfin_sir_port *port;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ port = self->sir_port;
+ if (self->open) {
+ if (self->newspeed) {
+ self->speed = self->newspeed;
+ self->newspeed = 0;
+ }
+ bfin_sir_startup(port, dev);
+ bfin_sir_set_speed(port, 9600);
+ bfin_sir_enable_rx(port);
+ netif_device_attach(dev);
+ }
+ return 0;
+}
+#else
+#define bfin_sir_suspend NULL
+#define bfin_sir_resume NULL
+#endif
+
+static void bfin_sir_send_work(struct work_struct *work)
+{
+ struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
+ struct net_device *dev = self->sir_port->dev;
+ struct bfin_sir_port *port = self->sir_port;
+ unsigned short val;
+ int tx_cnt = 10;
+
+ while (bfin_sir_is_receiving(dev) && --tx_cnt)
+ turnaround_delay(self->mtt);
+
+ bfin_sir_stop_rx(port);
+
+ /* To avoid losting RX interrupt, we reset IR function before
+ * sending data. We also can set the speed, which will
+ * reset all the UART.
+ */
+ val = UART_GET_GCTL(port);
+ val &= ~(UMOD_MASK | RPOLC);
+ UART_PUT_GCTL(port, val);
+ SSYNC();
+ val |= UMOD_IRDA | RPOLC;
+ UART_PUT_GCTL(port, val);
+ SSYNC();
+ /* bfin_sir_set_speed(port, self->speed); */
+
+#ifdef CONFIG_SIR_BFIN_DMA
+ bfin_sir_dma_tx_chars(dev);
+#endif
+ bfin_sir_enable_tx(port);
+ netif_trans_update(dev);
+}
+
+static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ netif_stop_queue(dev);
+
+ self->mtt = irda_get_mtt(skb);
+
+ if (speed != self->speed && speed != -1)
+ self->newspeed = speed;
+
+ self->tx_buff.data = self->tx_buff.head;
+ if (skb->len == 0)
+ self->tx_buff.len = 0;
+ else
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
+
+ schedule_work(&self->work);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ if (self->open) {
+ ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
+ bfin_sir_enable_rx(port);
+ } else {
+ dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = bfin_sir_is_receiving(dev);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+
+ return &self->stats;
+}
+
+static int bfin_sir_open(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+ struct bfin_sir_port *port = self->sir_port;
+ int err;
+
+ self->newspeed = 0;
+ self->speed = 9600;
+
+ spin_lock_init(&self->lock);
+
+ err = bfin_sir_startup(port, dev);
+ if (err)
+ goto err_startup;
+
+ bfin_sir_set_speed(port, 9600);
+
+ self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENOMEM;
+ goto err_irlap;
+ }
+
+ INIT_WORK(&self->work, bfin_sir_send_work);
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ self->open = 1;
+ bfin_sir_enable_rx(port);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_irlap:
+ self->open = 0;
+ bfin_sir_shutdown(port, dev);
+err_startup:
+ return err;
+}
+
+static int bfin_sir_stop(struct net_device *dev)
+{
+ struct bfin_sir_self *self = netdev_priv(dev);
+
+ flush_work(&self->work);
+ bfin_sir_shutdown(self->sir_port, dev);
+
+ if (self->rxskb) {
+ dev_kfree_skb(self->rxskb);
+ self->rxskb = NULL;
+ }
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+ self->open = 0;
+
+ return 0;
+}
+
+static int bfin_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ return 0;
+}
+
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
+static int bfin_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct bfin_sir_self *self;
+ unsigned int baudrate_mask;
+ struct bfin_sir_port *sir_port;
+ int err;
+
+ if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
+ per[pdev->id][3] == pdev->id) {
+ err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
+ if (err)
+ return err;
+ } else {
+ dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
+ return -ENODEV;
+ }
+
+ err = -ENOMEM;
+ sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
+ if (!sir_port)
+ goto err_mem_0;
+
+ bfin_sir_init_ports(sir_port, pdev);
+
+ dev = alloc_irdadev(sizeof(*self));
+ if (!dev)
+ goto err_mem_1;
+
+ self = netdev_priv(dev);
+ self->dev = &pdev->dev;
+ self->sir_port = sir_port;
+ sir_port->dev = dev;
+
+ err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
+ if (err)
+ goto err_mem_2;
+ err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_3;
+
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ baudrate_mask = IR_9600;
+
+ switch (max_rate) {
+ case 115200:
+ baudrate_mask |= IR_115200;
+ case 57600:
+ baudrate_mask |= IR_57600;
+ case 38400:
+ baudrate_mask |= IR_38400;
+ case 19200:
+ baudrate_mask |= IR_19200;
+ case 9600:
+ break;
+ default:
+ dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
+ }
+
+ self->qos.baud_rate.bits &= baudrate_mask;
+
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(dev);
+
+ if (err) {
+ kfree(self->tx_buff.head);
+err_mem_3:
+ kfree(self->rx_buff.head);
+err_mem_2:
+ free_netdev(dev);
+err_mem_1:
+ kfree(sir_port);
+err_mem_0:
+ peripheral_free_list(per[pdev->id]);
+ } else
+ platform_set_drvdata(pdev, sir_port);
+
+ return err;
+}
+
+static int bfin_sir_remove(struct platform_device *pdev)
+{
+ struct bfin_sir_port *sir_port;
+ struct net_device *dev = NULL;
+ struct bfin_sir_self *self;
+
+ sir_port = platform_get_drvdata(pdev);
+ if (!sir_port)
+ return 0;
+ dev = sir_port->dev;
+ self = netdev_priv(dev);
+ unregister_netdev(dev);
+ kfree(self->tx_buff.head);
+ kfree(self->rx_buff.head);
+ free_netdev(dev);
+ kfree(sir_port);
+
+ return 0;
+}
+
+static struct platform_driver bfin_ir_driver = {
+ .probe = bfin_sir_probe,
+ .remove = bfin_sir_remove,
+ .suspend = bfin_sir_suspend,
+ .resume = bfin_sir_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(bfin_ir_driver);
+
+module_param(max_rate, int, 0);
+MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
+
+MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
+MODULE_DESCRIPTION("Blackfin IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/bfin_sir.h b/drivers/staging/irda/drivers/bfin_sir.h
new file mode 100644
index 000000000000..d47cf14bb4a5
--- /dev/null
+++ b/drivers/staging/irda/drivers/bfin_sir.h
@@ -0,0 +1,93 @@
+/*
+ * Blackfin Infra-red Driver
+ *
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ */
+
+#include <linux/serial.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/irq.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#undef DRIVER_NAME
+
+#ifdef CONFIG_SIR_BFIN_DMA
+struct dma_rx_buf {
+ char *buf;
+ int head;
+ int tail;
+};
+#endif
+
+struct bfin_sir_port {
+ unsigned char __iomem *membase;
+ unsigned int irq;
+ unsigned int lsr;
+ unsigned long clk;
+ struct net_device *dev;
+#ifdef CONFIG_SIR_BFIN_DMA
+ int tx_done;
+ struct dma_rx_buf rx_dma_buf;
+ struct timer_list rx_dma_timer;
+ int rx_dma_nrows;
+#endif
+ unsigned int tx_dma_channel;
+ unsigned int rx_dma_channel;
+};
+
+struct bfin_sir_port_res {
+ unsigned long base_addr;
+ int irq;
+ unsigned int rx_dma_channel;
+ unsigned int tx_dma_channel;
+};
+
+struct bfin_sir_self {
+ struct bfin_sir_port *sir_port;
+ spinlock_t lock;
+ unsigned int open;
+ int speed;
+ int newspeed;
+
+ struct sk_buff *txskb;
+ struct sk_buff *rxskb;
+ struct net_device_stats stats;
+ struct device *dev;
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ struct work_struct work;
+ int mtt;
+};
+
+#define DRIVER_NAME "bfin_sir"
+
+#include <asm/bfin_serial.h>
+
+static const unsigned short per[][4] = {
+ /* rx pin tx pin NULL uart_number */
+ {P_UART0_RX, P_UART0_TX, 0, 0},
+ {P_UART1_RX, P_UART1_TX, 0, 1},
+ {P_UART2_RX, P_UART2_TX, 0, 2},
+ {P_UART3_RX, P_UART3_TX, 0, 3},
+};
diff --git a/drivers/staging/irda/drivers/donauboe.c b/drivers/staging/irda/drivers/donauboe.c
new file mode 100644
index 000000000000..b337e6d23a88
--- /dev/null
+++ b/drivers/staging/irda/drivers/donauboe.c
@@ -0,0 +1,1732 @@
+/*****************************************************************
+ *
+ * Filename: donauboe.c
+ * Version: 2.17
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Documentation: http://libxg.free.fr/irda/lib-irda.html
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: Paul Bristow <paul.bristow@technologist.com>
+ * Modified: Mon Nov 11 19:10:05 1999
+ * Modified: James McKenzie <james@fishsoup.dhs.org>
+ * Modified: Thu Mar 16 12:49:00 2000 (Substantial rewrite)
+ * Modified: Sat Apr 29 00:23:03 2000 (Added DONAUOBOE support)
+ * Modified: Wed May 24 23:45:02 2000 (Fixed chipio_t structure)
+ * Modified: 2.13 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.13 dim jan 07 21:57:39 2001 (tested with kernel 2.4 & irnet/ppp)
+ * Modified: 2.14 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.14 lun fev 05 17:55:59 2001 (adapted to patch-2.4.1-pre8-irda1)
+ * Modified: 2.15 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.15 Fri Jun 21 20:40:59 2002 (sync with 2.4.18, substantial fixes)
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (fix freeregion, default to verbose)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (save_flags();cli(); replaced by spinlocks)
+ * Modified: 2.18 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.18 ven jan 10 03:14:16 2003 Change probe default options
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ ********************************************************************/
+
+/* Look at toshoboe.h (currently in include/net/irda) for details of */
+/* Where to get documentation on the chip */
+
+/* See below for a description of the logic in this driver */
+
+/* User servicable parts */
+/* USE_PROBE Create the code which probes the chip and does a few tests */
+/* do_probe module parameter Enable this code */
+/* Probe code is very useful for understanding how the hardware works */
+/* Use it with various combinations of TT_LEN, RX_LEN */
+/* Strongly recommended, disable if the probe fails on your machine */
+/* and send me <james@fishsoup.dhs.org> the output of dmesg */
+#define USE_PROBE 1
+#undef USE_PROBE
+
+/* Trace Transmit ring, interrupts, Receive ring or not ? */
+#define PROBE_VERBOSE 1
+
+/* Debug option, examine sent and received raw data */
+/* Irdadump is better, but does not see all packets. enable it if you want. */
+#undef DUMP_PACKETS
+
+/* MIR mode has not been tested. Some behaviour is different */
+/* Seems to work against an Ericsson R520 for me. -Martin */
+#define USE_MIR
+
+/* Schedule back to back hardware transmits wherever possible, otherwise */
+/* we need an interrupt for every frame, unset if oboe works for a bit and */
+/* then hangs */
+#define OPTIMIZE_TX
+
+/* Set the number of slots in the rings */
+/* If you get rx/tx fifo overflows at high bitrates, you can try increasing */
+/* these */
+
+#define RING_SIZE (OBOE_RING_SIZE_RX8 | OBOE_RING_SIZE_TX8)
+#define TX_SLOTS 8
+#define RX_SLOTS 8
+
+
+/* Less user servicable parts below here */
+
+/* Test, Transmit and receive buffer sizes, adjust at your peril */
+/* remarks: nfs usually needs 1k blocks */
+/* remarks: in SIR mode, CRC is received, -> RX_LEN=TX_LEN+2 */
+/* remarks: test accepts large blocks. Standard is 0x80 */
+/* When TT_LEN > RX_LEN (SIR mode) data is stored in successive slots. */
+/* When 3 or more slots are needed for each test packet, */
+/* data received in the first slots is overwritten, even */
+/* if OBOE_CTL_RX_HW_OWNS is not set, without any error! */
+#define TT_LEN 0x80
+#define TX_LEN 0xc00
+#define RX_LEN 0xc04
+/* Real transmitted length (SIR mode) is about 14+(2%*TX_LEN) more */
+/* long than user-defined length (see async_wrap_skb) and is less then 4K */
+/* Real received length is (max RX_LEN) differs from user-defined */
+/* length only b the CRC (2 or 4 bytes) */
+#define BUF_SAFETY 0x7a
+#define RX_BUF_SZ (RX_LEN)
+#define TX_BUF_SZ (TX_LEN+BUF_SAFETY)
+
+
+/* Logic of the netdev part of this driver */
+
+/* The RX ring is filled with buffers, when a packet arrives */
+/* it is DMA'd into the buffer which is marked used and RxDone called */
+/* RxDone forms an skb (and checks the CRC if in SIR mode) and ships */
+/* the packet off upstairs */
+
+/* The transmitter on the oboe chip can work in one of two modes */
+/* for each ring->tx[] the transmitter can either */
+/* a) transmit the packet, leave the trasmitter enabled and proceed to */
+/* the next ring */
+/* OR */
+/* b) transmit the packet, switch off the transmitter and issue TxDone */
+
+/* All packets are entered into the ring in mode b), if the ring was */
+/* empty the transmitter is started. */
+
+/* If OPTIMIZE_TX is defined then in TxDone if the ring contains */
+/* more than one packet, all but the last are set to mode a) [HOWEVER */
+/* the hardware may not notice this, this is why we start in mode b) ] */
+/* then restart the transmitter */
+
+/* If OPTIMIZE_TX is not defined then we just restart the transmitter */
+/* if the ring isn't empty */
+
+/* Speed changes are delayed until the TxRing is empty */
+/* mtt is handled by generating packets with bad CRCs, before the data */
+
+/* TODO: */
+/* check the mtt works ok */
+/* finish the watchdog */
+
+/* No user servicable parts below here */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include <asm/io.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+//#include <net/irda/irmod.h>
+//#include <net/irda/irlap_frame.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/crc.h>
+
+#include "donauboe.h"
+
+#define INB(port) inb_p(port)
+#define OUTB(val,port) outb_p(val,port)
+#define OUTBP(val,port) outb_p(val,port)
+
+#define PROMPT OUTB(OBOE_PROMPT_BIT,OBOE_PROMPT);
+
+#if PROBE_VERBOSE
+#define PROBE_DEBUG(args...) (printk (args))
+#else
+#define PROBE_DEBUG(args...) ;
+#endif
+
+/* Set the DMA to be byte at a time */
+#define CONFIG0H_DMA_OFF OBOE_CONFIG0H_RCVANY
+#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
+#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
+
+static const struct pci_device_id toshoboe_pci_tbl[] = {
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, toshoboe_pci_tbl);
+
+#define DRIVER_NAME "toshoboe"
+static char *driver_name = DRIVER_NAME;
+
+static int max_baud = 4000000;
+#ifdef USE_PROBE
+static bool do_probe = false;
+#endif
+
+
+/**********************************************************************/
+static int
+toshoboe_checkfcs (unsigned char *buf, int len)
+{
+ int i;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ fcs.value = INIT_FCS;
+
+ for (i = 0; i < len; ++i)
+ fcs.value = irda_fcs (fcs.value, *(buf++));
+
+ return fcs.value == GOOD_FCS;
+}
+
+/***********************************************************************/
+/* Generic chip handling code */
+#ifdef DUMP_PACKETS
+static unsigned char dump[50];
+static void
+_dumpbufs (unsigned char *data, int len, char tete)
+{
+int i,j;
+char head=tete;
+for (i=0;i<len;i+=16) {
+ for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
+ dump [3*j]=0;
+ pr_debug("%c%s\n", head, dump);
+ head='+';
+ }
+}
+#endif
+
+#ifdef USE_PROBE
+/* Dump the registers */
+static void
+toshoboe_dumpregs (struct toshoboe_cb *self)
+{
+ __u32 ringbase;
+
+ ringbase = INB (OBOE_RING_BASE0) << 10;
+ ringbase |= INB (OBOE_RING_BASE1) << 18;
+ ringbase |= INB (OBOE_RING_BASE2) << 26;
+
+ printk (KERN_ERR DRIVER_NAME ": Register dump:\n");
+ printk (KERN_ERR "Interrupts: Tx:%d Rx:%d TxUnder:%d RxOver:%d Sip:%d\n",
+ self->int_tx, self->int_rx, self->int_txunder, self->int_rxover,
+ self->int_sip);
+ printk (KERN_ERR "RX %02x TX %02x RingBase %08x\n",
+ INB (OBOE_RXSLOT), INB (OBOE_TXSLOT), ringbase);
+ printk (KERN_ERR "RING_SIZE %02x IER %02x ISR %02x\n",
+ INB (OBOE_RING_SIZE), INB (OBOE_IER), INB (OBOE_ISR));
+ printk (KERN_ERR "CONFIG1 %02x STATUS %02x\n",
+ INB (OBOE_CONFIG1), INB (OBOE_STATUS));
+ printk (KERN_ERR "CONFIG0 %02x%02x ENABLE %02x%02x\n",
+ INB (OBOE_CONFIG0H), INB (OBOE_CONFIG0L),
+ INB (OBOE_ENABLEH), INB (OBOE_ENABLEL));
+ printk (KERN_ERR "NEW_PCONFIG %02x%02x CURR_PCONFIG %02x%02x\n",
+ INB (OBOE_NEW_PCONFIGH), INB (OBOE_NEW_PCONFIGL),
+ INB (OBOE_CURR_PCONFIGH), INB (OBOE_CURR_PCONFIGL));
+ printk (KERN_ERR "MAXLEN %02x%02x RXCOUNT %02x%02x\n",
+ INB (OBOE_MAXLENH), INB (OBOE_MAXLENL),
+ INB (OBOE_RXCOUNTL), INB (OBOE_RXCOUNTH));
+
+ if (self->ring)
+ {
+ int i;
+ ringbase = virt_to_bus (self->ring);
+ printk (KERN_ERR "Ring at %08x:\n", ringbase);
+ printk (KERN_ERR "RX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ printk ("\n");
+ printk (KERN_ERR "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ printk ("\n");
+ }
+}
+#endif
+
+/*Don't let the chip look at memory */
+static void
+toshoboe_disablebm (struct toshoboe_cb *self)
+{
+ __u8 command;
+ pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_byte (self->pdev, PCI_COMMAND, command);
+
+}
+
+/* Shutdown the chip and point the taskfile reg somewhere else */
+static void
+toshoboe_stopchip (struct toshoboe_cb *self)
+{
+ /*Disable interrupts */
+ OUTB (0x0, OBOE_IER);
+ /*Disable DMA, Disable Rx, Disable Tx */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ /*Disable SIR MIR FIR, Tx and Rx */
+ OUTB (0x00, OBOE_ENABLEH);
+ /*Point the ring somewhere safe */
+ OUTB (0x3f, OBOE_RING_BASE2);
+ OUTB (0xff, OBOE_RING_BASE1);
+ OUTB (0xff, OBOE_RING_BASE0);
+
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Why */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*switch it off */
+ OUTB (OBOE_CONFIG1_OFF, OBOE_CONFIG1);
+
+ toshoboe_disablebm (self);
+}
+
+/* Transmitter initialization */
+static void
+toshoboe_start_DMA (struct toshoboe_cb *self, int opts)
+{
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON | opts, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+}
+
+/*Set the baud rate */
+static void
+toshoboe_setbaud (struct toshoboe_cb *self)
+{
+ __u16 pconfig = 0;
+ __u8 config0l = 0;
+
+ pr_debug("%s(%d/%d)\n", __func__, self->speed, self->io.speed);
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+#ifdef USE_MIR
+ case 1152000:
+#endif
+ case 4000000:
+ break;
+ default:
+
+ printk (KERN_ERR DRIVER_NAME ": switch to unsupported baudrate %d\n",
+ self->speed);
+ return;
+ }
+
+ switch (self->speed)
+ {
+ /* For SIR the preamble is done by adding XBOFs */
+ /* to the packet */
+ /* set to filtered SIR mode, filter looks for BOF and EOF */
+ case 2400:
+ pconfig |= 47 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 4800:
+ pconfig |= 23 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 9600:
+ pconfig |= 11 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 19200:
+ pconfig |= 5 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 38400:
+ pconfig |= 2 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 57600:
+ pconfig |= 1 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ case 115200:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
+ break;
+ default:
+ /*Set to packet based reception */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ break;
+ }
+
+ switch (self->speed)
+ {
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ config0l = OBOE_CONFIG0L_ENSIR;
+ if (self->async)
+ {
+ /*Set to character based reception */
+ /*System will lock if MAXLEN=0 */
+ /*so have to be careful */
+ OUTB (0x01, OBOE_MAXLENH);
+ OUTB (0x01, OBOE_MAXLENL);
+ OUTB (0x00, OBOE_MAXLENH);
+ }
+ else
+ {
+ /*Set to packet based reception */
+ config0l |= OBOE_CONFIG0L_ENSIRF;
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+ }
+ break;
+
+#ifdef USE_MIR
+ /* MIR mode */
+ /* Set for 16 bit CRC and enable MIR */
+ /* Preamble now handled by the chip */
+ case 1152000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ pconfig |= 8 << OBOE_PCONFIG_WIDTHSHIFT;
+ pconfig |= 1 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_CRC16 | OBOE_CONFIG0L_ENMIR;
+ break;
+#endif
+ /* FIR mode */
+ /* Set for 32 bit CRC and enable FIR */
+ /* Preamble handled by the chip */
+ case 4000000:
+ pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
+ /* Documentation says 14, but toshiba use 15 in their drivers */
+ pconfig |= 15 << OBOE_PCONFIG_PREAMBLESHIFT;
+ config0l = OBOE_CONFIG0L_ENFIR;
+ break;
+ }
+
+ /* Copy into new PHY config buffer */
+ OUTBP (pconfig >> 8, OBOE_NEW_PCONFIGH);
+ OUTB (pconfig & 0xff, OBOE_NEW_PCONFIGL);
+ OUTB (config0l, OBOE_CONFIG0L);
+
+ /* Now make OBOE copy from new PHY to current PHY */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+ PROMPT;
+
+ /* speed change executed */
+ self->new_speed = 0;
+ self->io.speed = self->speed;
+}
+
+/*Let the chip look at memory */
+static void
+toshoboe_enablebm (struct toshoboe_cb *self)
+{
+ pci_set_master (self->pdev);
+}
+
+/*setup the ring */
+static void
+toshoboe_initring (struct toshoboe_cb *self)
+{
+ int i;
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->ring->tx[i].len = 0;
+ self->ring->tx[i].control = 0x00;
+ self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]);
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->ring->rx[i].len = RX_LEN;
+ self->ring->rx[i].len = 0;
+ self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]);
+ self->ring->rx[i].control = OBOE_CTL_RX_HW_OWNS;
+ }
+}
+
+static void
+toshoboe_resetptrs (struct toshoboe_cb *self)
+{
+ /* Can reset pointers by twidling DMA */
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTBP (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->rxs = inb_p (OBOE_RXSLOT) & OBOE_SLOT_MASK;
+ self->txs = inb_p (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+}
+
+/* Called in locked state */
+static void
+toshoboe_initptrs (struct toshoboe_cb *self)
+{
+
+ /* spin_lock_irqsave(self->spinlock, flags); */
+ /* save_flags (flags); */
+
+ /* Can reset pointers by twidling DMA */
+ toshoboe_resetptrs (self);
+
+ OUTB (0x0, OBOE_ENABLEH);
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ self->txpending = 0;
+
+ /* spin_unlock_irqrestore(self->spinlock, flags); */
+ /* restore_flags (flags); */
+}
+
+/* Wake the chip up and get it looking at the rings */
+/* Called in locked state */
+static void
+toshoboe_startchip (struct toshoboe_cb *self)
+{
+ __u32 physaddr;
+
+ toshoboe_initring (self);
+ toshoboe_enablebm (self);
+ OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
+ OUTBP (OBOE_CONFIG1_ON, OBOE_CONFIG1);
+
+ /* Stop the clocks */
+ OUTB (0, OBOE_ENABLEH);
+
+ /*Set size of rings */
+ OUTB (RING_SIZE, OBOE_RING_SIZE);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Enable ints */
+ OUTB (OBOE_INT_TXDONE | OBOE_INT_RXDONE |
+ OBOE_INT_TXUNDER | OBOE_INT_RXOVER | OBOE_INT_SIP , OBOE_IER);
+
+ /*Acknoledge any pending interrupts */
+ OUTB (0xff, OBOE_ISR);
+
+ /*Set the maximum packet length to 0xfff (4095) */
+ OUTB (RX_LEN >> 8, OBOE_MAXLENH);
+ OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
+
+ /*Shutdown DMA */
+ OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
+
+ /*Find out where the rings live */
+ physaddr = virt_to_bus (self->ring);
+
+ IRDA_ASSERT ((physaddr & 0x3ff) == 0,
+ printk (KERN_ERR DRIVER_NAME "ring not correctly aligned\n");
+ return;);
+
+ OUTB ((physaddr >> 10) & 0xff, OBOE_RING_BASE0);
+ OUTB ((physaddr >> 18) & 0xff, OBOE_RING_BASE1);
+ OUTB ((physaddr >> 26) & 0x3f, OBOE_RING_BASE2);
+
+ /*Enable DMA controller in byte mode and RX */
+ OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
+
+ /* Start up the clocks */
+ OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
+
+ /*set to sensible speed */
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+}
+
+static void
+toshoboe_isntstuck (struct toshoboe_cb *self)
+{
+}
+
+static void
+toshoboe_checkstuck (struct toshoboe_cb *self)
+{
+ unsigned long flags;
+
+ if (0)
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ /* This will reset the chip completely */
+ printk (KERN_ERR DRIVER_NAME ": Resetting chip\n");
+
+ toshoboe_stopchip (self);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+}
+
+/*Generate packet of about mtt us long */
+static int
+toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
+{
+ int xbofs;
+
+ xbofs = ((int) (mtt/100)) * (int) (self->speed);
+ xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
+ xbofs++;
+
+ pr_debug(DRIVER_NAME ": generated mtt of %d bytes for %d us at %d baud\n",
+ xbofs, mtt, self->speed);
+
+ if (xbofs > TX_LEN)
+ {
+ printk (KERN_ERR DRIVER_NAME ": wanted %d bytes MTT but TX_LEN is %d\n",
+ xbofs, TX_LEN);
+ xbofs = TX_LEN;
+ }
+
+ /*xbofs will do for SIR, MIR and FIR,SIR mode doesn't generate a checksum anyway */
+ memset (buf, XBOF, xbofs);
+
+ return xbofs;
+}
+
+#ifdef USE_PROBE
+/***********************************************************************/
+/* Probe code */
+
+static void
+toshoboe_dumptx (struct toshoboe_cb *self)
+{
+ int i;
+ PROBE_DEBUG(KERN_WARNING "TX:");
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
+ PROBE_DEBUG(" [%d]\n",self->speed);
+}
+
+static void
+toshoboe_dumprx (struct toshoboe_cb *self, int score)
+{
+ int i;
+ PROBE_DEBUG(" %d\nRX:",score);
+ for (i = 0; i < RX_SLOTS; ++i)
+ PROBE_DEBUG(" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
+ PROBE_DEBUG("\n");
+}
+
+static inline int
+stuff_byte (__u8 byte, __u8 * buf)
+{
+ switch (byte)
+ {
+ case BOF: /* FALLTHROUGH */
+ case EOF: /* FALLTHROUGH */
+ case CE:
+ /* Insert transparently coded */
+ buf[0] = CE; /* Send link escape */
+ buf[1] = byte ^ IRDA_TRANS; /* Complement bit 5 */
+ return 2;
+ /* break; */
+ default:
+ /* Non-special value, no transparency required */
+ buf[0] = byte;
+ return 1;
+ /* break; */
+ }
+}
+
+static irqreturn_t
+toshoboe_probeinterrupt (int irq, void *dev_id)
+{
+ struct toshoboe_cb *self = dev_id;
+ __u8 irqstat;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return IRQ_NONE;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp;
+
+ self->int_tx++;
+ PROBE_DEBUG("T");
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ self->int_tx+=100;
+ PROBE_DEBUG("S");
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+ }
+ }
+
+ if (irqstat & OBOE_INT_RXDONE) {
+ self->int_rx++;
+ PROBE_DEBUG("R"); }
+ if (irqstat & OBOE_INT_TXUNDER) {
+ self->int_txunder++;
+ PROBE_DEBUG("U"); }
+ if (irqstat & OBOE_INT_RXOVER) {
+ self->int_rxover++;
+ PROBE_DEBUG("O"); }
+ if (irqstat & OBOE_INT_SIP) {
+ self->int_sip++;
+ PROBE_DEBUG("I"); }
+ return IRQ_HANDLED;
+}
+
+static int
+toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
+{
+ int i;
+ int len = 0;
+ union
+ {
+ __u16 value;
+ __u8 bytes[2];
+ }
+ fcs;
+
+ if (fir)
+ {
+ memset (buf, 0, TT_LEN);
+ return TT_LEN;
+ }
+
+ fcs.value = INIT_FCS;
+
+ memset (buf, XBOF, 10);
+ len += 10;
+ buf[len++] = BOF;
+
+ for (i = 0; i < TT_LEN; ++i)
+ {
+ len += stuff_byte (i, buf + len);
+ fcs.value = irda_fcs (fcs.value, i);
+ }
+
+ len += stuff_byte (fcs.bytes[0] ^ badcrc, buf + len);
+ len += stuff_byte (fcs.bytes[1] ^ badcrc, buf + len);
+ buf[len++] = EOF;
+ len++;
+ return len;
+}
+
+static int
+toshoboe_probefail (struct toshoboe_cb *self, char *msg)
+{
+ printk (KERN_ERR DRIVER_NAME "probe(%d) failed %s\n",self-> speed, msg);
+ toshoboe_dumpregs (self);
+ toshoboe_stopchip (self);
+ free_irq (self->io.irq, (void *) self);
+ return 0;
+}
+
+static int
+toshoboe_numvalidrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if ((self->ring->rx[i].control & 0xe0) == 0)
+ ret++;
+
+ return ret;
+}
+
+static int
+toshoboe_numrcvs (struct toshoboe_cb *self)
+{
+ int i, ret = 0;
+ for (i = 0; i < RX_SLOTS; ++i)
+ if (!(self->ring->rx[i].control & OBOE_CTL_RX_HW_OWNS))
+ ret++;
+
+ return ret;
+}
+
+static int
+toshoboe_probe (struct toshoboe_cb *self)
+{
+ int i, j, n;
+#ifdef USE_MIR
+ static const int bauds[] = { 9600, 115200, 4000000, 1152000 };
+#else
+ static const int bauds[] = { 9600, 115200, 4000000 };
+#endif
+ unsigned long flags;
+
+ if (request_irq (self->io.irq, toshoboe_probeinterrupt,
+ self->io.irqflags, "toshoboe", (void *) self))
+ {
+ printk (KERN_ERR DRIVER_NAME ": probe failed to allocate irq %d\n",
+ self->io.irq);
+ return 0;
+ }
+
+ /* test 1: SIR filter and back to back */
+
+ for (j = 0; j < ARRAY_SIZE(bauds); ++j)
+ {
+ int fir = (j > 1);
+ toshoboe_stopchip (self);
+
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ /*Address is already setup */
+ toshoboe_startchip (self);
+ self->int_rx = self->int_tx = 0;
+ self->speed = bauds[j];
+ toshoboe_setbaud (self);
+ toshoboe_initptrs (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->ring->tx[self->txs].control =
+/* (FIR only) OBOE_CTL_TX_SIP needed for switching to next slot */
+/* MIR: all received data is stored in one slot */
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_SIP
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ : OBOE_CTL_TX_HW_OWNS ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ self->ring->tx[self->txs].control =
+ (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
+ | OBOE_CTL_TX_SIP | OBOE_CTL_TX_BAD_CRC
+ : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
+ self->ring->tx[self->txs].len =
+ toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ toshoboe_dumptx (self);
+ /* Turn on TX and RX and loopback */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ n = fir ? 1 : 4;
+ while (toshoboe_numvalidrcvs (self) != n)
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "filter test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+
+ n = fir ? 203 : 102;
+ while ((toshoboe_numrcvs(self) != self->int_rx) || (self->int_tx != n))
+ {
+ if (i > 4800)
+ return toshoboe_probefail (self, "interrupt test");
+ udelay ((9600*(TT_LEN+16))/self->speed);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ }
+
+ /* test 2: SIR in char at a time */
+
+ toshoboe_stopchip (self);
+ self->int_rx = self->int_tx = 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ self->async = 1;
+ self->speed = 115200;
+ toshoboe_setbaud (self);
+ self->ring->tx[self->txs].control =
+ OBOE_CTL_TX_RTCENTX | OBOE_CTL_TX_HW_OWNS;
+ self->ring->tx[self->txs].len = 4;
+
+ ((unsigned char *) self->tx_bufs[self->txs])[0] = 'f';
+ ((unsigned char *) self->tx_bufs[self->txs])[1] = 'i';
+ ((unsigned char *) self->tx_bufs[self->txs])[2] = 's';
+ ((unsigned char *) self->tx_bufs[self->txs])[3] = 'h';
+ toshoboe_dumptx (self);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ i = 0;
+ while (toshoboe_numvalidrcvs (self) != 4)
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async test");
+ udelay (100);
+ i++;
+ }
+
+ while ((toshoboe_numrcvs (self) != self->int_rx) || (self->int_tx != 1))
+ {
+ if (i > 100)
+ return toshoboe_probefail (self, "Async interrupt test");
+ udelay (100);
+ i++;
+ }
+ toshoboe_dumprx (self,i);
+
+ self->async = 0;
+ self->speed = 9600;
+ toshoboe_setbaud (self);
+ toshoboe_stopchip (self);
+
+ free_irq (self->io.irq, (void *) self);
+
+ printk (KERN_WARNING DRIVER_NAME ": Self test passed ok\n");
+
+ return 1;
+}
+#endif
+
+/******************************************************************/
+/* Netdev style code */
+
+/* Transmit something */
+static netdev_tx_t
+toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ __s32 speed;
+ int mtt, len, ctl;
+ unsigned long flags;
+ struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; );
+
+ pr_debug("%s.tx:%x(%x)%x\n",
+ __func__, skb->len, self->txpending, INB(OBOE_ENABLEH));
+ if (!cb->magic) {
+ pr_debug("%s.Not IrLAP:%x\n", __func__, cb->magic);
+#ifdef DUMP_PACKETS
+ _dumpbufs(skb->data,skb->len,'>');
+#endif
+ }
+
+ /* change speed pending, wait for its execution */
+ if (self->new_speed)
+ return NETDEV_TX_BUSY;
+
+ /* device stopped (apm) wait for restart */
+ if (self->stopped)
+ return NETDEV_TX_BUSY;
+
+ toshoboe_checkstuck (self);
+
+ /* Check if we need to change the speed */
+ /* But not now. Wait after transmission if mtt not required */
+ speed=irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1))
+ {
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending || skb->len)
+ {
+ self->new_speed = speed;
+ pr_debug("%s: Queued TxDone scheduled speed change %d\n" ,
+ __func__, speed);
+ /* if no data, that's all! */
+ if (!skb->len)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+ }
+ /* True packet, go on, but */
+ /* do not accept anything before change speed execution */
+ netif_stop_queue(dev);
+ /* ready to process TxDone interrupt */
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+ else
+ {
+ /* idle and no data, change speed now */
+ self->speed = speed;
+ toshoboe_setbaud (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+ }
+
+ }
+
+ if ((mtt = irda_get_mtt(skb)))
+ {
+ /* This is fair since the queue should be empty anyway */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->txpending)
+ {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* If in SIR mode we need to generate a string of XBOFs */
+ /* In MIR and FIR we need to generate a string of data */
+ /* which we will add a wrong checksum to */
+
+ mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
+ pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending);
+ if (mtt)
+ {
+ self->ring->tx[self->txs].len = mtt & 0xfff;
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC | OBOE_CTL_TX_SIP ;
+ }
+#ifdef USE_MIR
+ else if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_MIRON)
+ {
+ ctl |= OBOE_CTL_TX_BAD_CRC;
+ }
+#endif
+ self->ring->tx[self->txs].control = ctl;
+
+ OUTB (0x0, OBOE_ENABLEH);
+ /* It is only a timer. Do not send mtt packet outside! */
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ }
+ else
+ {
+ printk(KERN_ERR DRIVER_NAME ": problem with mtt packet - ignored\n");
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+
+#ifdef DUMP_PACKETS
+dumpbufs(skb->data,skb->len,'>');
+#endif
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ pr_debug("%s.ful:%x(%x)%x\n",
+ __func__, skb->len, self->ring->tx[self->txs].control,
+ self->txpending);
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
+ {
+ len = async_wrap_skb (skb, self->tx_bufs[self->txs], TX_BUF_SZ);
+ }
+ else
+ {
+ len = skb->len;
+ skb_copy_from_linear_data(skb, self->tx_bufs[self->txs], len);
+ }
+ self->ring->tx[self->txs].len = len & 0x0fff;
+
+ /*Sometimes the HW doesn't see us assert RTCENTX in the interrupt code */
+ /*later this plays safe, we garuntee the last packet to be transmitted */
+ /*has RTCENTX set */
+
+ ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
+ if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
+ {
+ ctl |= OBOE_CTL_TX_SIP ;
+ }
+ self->ring->tx[self->txs].control = ctl;
+
+ /* If transmitter is idle start in one-shot mode */
+
+ if (!self->txpending)
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+
+ self->txpending++;
+
+ self->txs++;
+ self->txs %= TX_SLOTS;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ dev_kfree_skb (skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*interrupt handler */
+static irqreturn_t
+toshoboe_interrupt (int irq, void *dev_id)
+{
+ struct toshoboe_cb *self = dev_id;
+ __u8 irqstat;
+ struct sk_buff *skb = NULL;
+
+ irqstat = INB (OBOE_ISR);
+
+/* was it us */
+ if (!(irqstat & OBOE_INT_MASK))
+ return IRQ_NONE;
+
+/* Ack all the interrupts */
+ OUTB (irqstat, OBOE_ISR);
+
+ toshoboe_isntstuck (self);
+
+/* Txdone */
+ if (irqstat & OBOE_INT_TXDONE)
+ {
+ int txp, txpc;
+ int i;
+
+ txp = self->txpending;
+ self->txpending = 0;
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
+ self->txpending++;
+ }
+ pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending);
+
+ txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
+
+ /* Got anything queued ? start it together */
+ if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txpc = txp;
+#ifdef OPTIMIZE_TX
+ while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ {
+ txp = txpc;
+ txpc++;
+ txpc %= TX_SLOTS;
+ self->netdev->stats.tx_packets++;
+ if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
+ self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
+ }
+ self->netdev->stats.tx_packets--;
+#else
+ self->netdev->stats.tx_packets++;
+#endif
+ toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
+ }
+
+ if ((!self->txpending) && (self->new_speed))
+ {
+ self->speed = self->new_speed;
+ pr_debug("%s: Executed TxDone scheduled speed change %d\n",
+ __func__, self->speed);
+ toshoboe_setbaud (self);
+ }
+
+ /* Tell network layer that we want more frames */
+ if (!self->new_speed)
+ netif_wake_queue(self->netdev);
+ }
+
+ if (irqstat & OBOE_INT_RXDONE)
+ {
+ while (!(self->ring->rx[self->rxs].control & OBOE_CTL_RX_HW_OWNS))
+ {
+ int len = self->ring->rx[self->rxs].len;
+ skb = NULL;
+ pr_debug("%s.rcv:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
+
+#ifdef DUMP_PACKETS
+dumpbufs(self->rx_bufs[self->rxs],len,'<');
+#endif
+
+ if (self->ring->rx[self->rxs].control == 0)
+ {
+ __u8 enable = INB (OBOE_ENABLEH);
+
+ /* In SIR mode we need to check the CRC as this */
+ /* hasn't been done by the hardware */
+ if (enable & OBOE_ENABLEH_SIRON)
+ {
+ if (!toshoboe_checkfcs (self->rx_bufs[self->rxs], len))
+ len = 0;
+ /*Trim off the CRC */
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ pr_debug("%s.SIR:%x(%x)\n", __func__, len, enable);
+ }
+
+#ifdef USE_MIR
+ else if (enable & OBOE_ENABLEH_MIRON)
+ {
+ if (len > 1)
+ len -= 2;
+ else
+ len = 0;
+ pr_debug("%s.MIR:%x(%x)\n", __func__, len, enable);
+ }
+#endif
+ else if (enable & OBOE_ENABLEH_FIRON)
+ {
+ if (len > 3)
+ len -= 4; /*FIXME: check this */
+ else
+ len = 0;
+ pr_debug("%s.FIR:%x(%x)\n", __func__, len, enable);
+ }
+ else
+ pr_debug("%s.?IR:%x(%x)\n", __func__, len, enable);
+
+ if (len)
+ {
+ skb = dev_alloc_skb (len + 1);
+ if (skb)
+ {
+ skb_reserve (skb, 1);
+
+ skb_put (skb, len);
+ skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
+ len);
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons (ETH_P_IRDA);
+ }
+ else
+ {
+ printk (KERN_INFO
+ "%s(), memory squeeze, dropping frame.\n",
+ __func__);
+ }
+ }
+ }
+ else
+ {
+ /* TODO: =========================================== */
+ /* if OBOE_CTL_RX_LENGTH, our buffers are too small */
+ /* (MIR or FIR) data is lost. */
+ /* (SIR) data is splitted in several slots. */
+ /* we have to join all the received buffers received */
+ /*in a large buffer before checking CRC. */
+ pr_debug("%s.err:%x(%x)\n", __func__
+ , len, self->ring->rx[self->rxs].control);
+ }
+
+ self->ring->rx[self->rxs].len = 0x0;
+ self->ring->rx[self->rxs].control = OBOE_CTL_RX_HW_OWNS;
+
+ self->rxs++;
+ self->rxs %= RX_SLOTS;
+
+ if (skb)
+ netif_rx (skb);
+
+ }
+ }
+
+ if (irqstat & OBOE_INT_TXUNDER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n");
+ }
+ if (irqstat & OBOE_INT_RXOVER)
+ {
+ printk (KERN_WARNING DRIVER_NAME ": rx fifo overflow\n");
+ }
+/* This must be useful for something... */
+ if (irqstat & OBOE_INT_SIP)
+ {
+ self->int_sip++;
+ pr_debug("%s.sip:%x(%x)%x\n",
+ __func__, self->int_sip, irqstat, self->txpending);
+ }
+ return IRQ_HANDLED;
+}
+
+
+static int
+toshoboe_net_open (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+ unsigned long flags;
+ int rc;
+
+ self = netdev_priv(dev);
+
+ if (self->async)
+ return -EBUSY;
+
+ if (self->stopped)
+ return 0;
+
+ rc = request_irq (self->io.irq, toshoboe_interrupt,
+ IRQF_SHARED, dev->name, self);
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+ toshoboe_startchip (self);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open (dev, &self->qos, driver_name);
+
+ self->irdad = 1;
+
+ return 0;
+}
+
+static int
+toshoboe_net_close (struct net_device *dev)
+{
+ struct toshoboe_cb *self;
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+ self = netdev_priv(dev);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close (self->irlap);
+ self->irlap = NULL;
+
+ self->irdad = 0;
+
+ free_irq (self->io.irq, (void *) self);
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ return 0;
+}
+
+/*
+ * Function toshoboe_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int
+toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct toshoboe_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT (dev != NULL, return -1; );
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT (self != NULL, return -1; );
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ switch (cmd)
+ {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ /* This function will also be used by IrLAP to change the
+ * speed, so we still must allow for speed change within
+ * interrupt context.
+ */
+ pr_debug("%s(BANDWIDTH), %s, (%X/%ld\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_baudrate);
+ if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* self->speed=irq->ifr_baudrate; */
+ /* toshoboe_setbaud(self); */
+ /* Just change speed once - inserted by Paul Bristow */
+ self->new_speed = irq->ifr_baudrate;
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ pr_debug("%s(MEDIABUSY), %s, (%X/%x)\n",
+ __func__, dev->name,
+ INB(OBOE_STATUS), capable(CAP_NET_ADMIN));
+ if (!capable (CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy (self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
+ pr_debug("%s(RECEIVING), %s, (%X/%x)\n",
+ __func__, dev->name, INB(OBOE_STATUS), irq->ifr_receiving);
+ break;
+ default:
+ pr_debug("%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+ ret = -EOPNOTSUPP;
+ }
+out:
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return ret;
+
+}
+
+MODULE_DESCRIPTION("Toshiba OBOE IrDA Device Driver");
+MODULE_AUTHOR("James McKenzie <james@fishsoup.dhs.org>");
+MODULE_LICENSE("GPL");
+
+module_param (max_baud, int, 0);
+MODULE_PARM_DESC(max_baud, "Maximum baud rate");
+
+#ifdef USE_PROBE
+module_param (do_probe, bool, 0);
+MODULE_PARM_DESC(do_probe, "Enable/disable chip probing and self-test");
+#endif
+
+static void
+toshoboe_close (struct pci_dev *pci_dev)
+{
+ int i;
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
+
+ IRDA_ASSERT (self != NULL, return; );
+
+ if (!self->stopped)
+ {
+ toshoboe_stopchip (self);
+ }
+
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ kfree (self->tx_bufs[i]);
+ self->tx_bufs[i] = NULL;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ kfree (self->rx_bufs[i]);
+ self->rx_bufs[i] = NULL;
+ }
+
+ unregister_netdev(self->netdev);
+
+ kfree (self->ringbuf);
+ self->ringbuf = NULL;
+ self->ring = NULL;
+
+ free_netdev(self->netdev);
+}
+
+static const struct net_device_ops toshoboe_netdev_ops = {
+ .ndo_open = toshoboe_net_open,
+ .ndo_stop = toshoboe_net_close,
+ .ndo_start_xmit = toshoboe_hard_xmit,
+ .ndo_do_ioctl = toshoboe_net_ioctl,
+};
+
+static int
+toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
+{
+ struct toshoboe_cb *self;
+ struct net_device *dev;
+ int i = 0;
+ int ok = 0;
+ int err;
+
+ if ((err=pci_enable_device(pci_dev)))
+ return err;
+
+ dev = alloc_irdadev(sizeof (struct toshoboe_cb));
+ if (dev == NULL)
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't allocate memory for "
+ "IrDA control block\n");
+ return -ENOMEM;
+ }
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ self->pdev = pci_dev;
+ self->base = pci_resource_start(pci_dev,0);
+
+ self->io.fir_base = self->base;
+ self->io.fir_ext = OBOE_IO_EXTENT;
+ self->io.irq = pci_dev->irq;
+ self->io.irqflags = IRQF_SHARED;
+
+ self->speed = self->io.speed = 9600;
+ self->async = 0;
+
+ /* Lock the port that we need */
+ if (NULL==request_region (self->io.fir_base, self->io.fir_ext, driver_name))
+ {
+ printk (KERN_ERR DRIVER_NAME ": can't get iobase of 0x%03x\n"
+ ,self->io.fir_base);
+ err = -EBUSY;
+ goto freeself;
+ }
+
+ spin_lock_init(&self->spinlock);
+
+ irda_init_max_qos_capabilies (&self->qos);
+ self->qos.baud_rate.bits = 0;
+
+ if (max_baud >= 2400)
+ self->qos.baud_rate.bits |= IR_2400;
+ /*if (max_baud>=4800) idev->qos.baud_rate.bits|=IR_4800; */
+ if (max_baud >= 9600)
+ self->qos.baud_rate.bits |= IR_9600;
+ if (max_baud >= 19200)
+ self->qos.baud_rate.bits |= IR_19200;
+ if (max_baud >= 115200)
+ self->qos.baud_rate.bits |= IR_115200;
+#ifdef USE_MIR
+ if (max_baud >= 1152000)
+ {
+ self->qos.baud_rate.bits |= IR_1152000;
+ }
+#endif
+ if (max_baud >= 4000000)
+ {
+ self->qos.baud_rate.bits |= (IR_4000000 << 8);
+ }
+
+ /*FIXME: work this out... */
+ self->qos.min_turn_time.bits = 0xff;
+
+ irda_qos_bits_to_value (&self->qos);
+
+ /* Allocate twice the size to guarantee alignment */
+ self->ringbuf = kmalloc(OBOE_RING_LEN << 1, GFP_KERNEL);
+ if (!self->ringbuf)
+ {
+ err = -ENOMEM;
+ goto freeregion;
+ }
+
+#if (BITS_PER_LONG == 64)
+#error broken on 64-bit: casts pointer to 32-bit, and then back to pointer.
+#endif
+
+ /*We need to align the taskfile on a taskfile size boundary */
+ {
+ unsigned long addr;
+
+ addr = (__u32) self->ringbuf;
+ addr &= ~(OBOE_RING_LEN - 1);
+ addr += OBOE_RING_LEN;
+ self->ring = (struct OboeRing *) addr;
+ }
+
+ memset (self->ring, 0, OBOE_RING_LEN);
+ self->io.mem_base = (__u32) self->ring;
+
+ ok = 1;
+ for (i = 0; i < TX_SLOTS; ++i)
+ {
+ self->tx_bufs[i] = kmalloc (TX_BUF_SZ, GFP_KERNEL);
+ if (!self->tx_bufs[i])
+ ok = 0;
+ }
+
+ for (i = 0; i < RX_SLOTS; ++i)
+ {
+ self->rx_bufs[i] = kmalloc (RX_BUF_SZ, GFP_KERNEL);
+ if (!self->rx_bufs[i])
+ ok = 0;
+ }
+
+ if (!ok)
+ {
+ err = -ENOMEM;
+ goto freebufs;
+ }
+
+
+#ifdef USE_PROBE
+ if (do_probe)
+ if (!toshoboe_probe (self))
+ {
+ err = -ENODEV;
+ goto freebufs;
+ }
+#endif
+
+ SET_NETDEV_DEV(dev, &pci_dev->dev);
+ dev->netdev_ops = &toshoboe_netdev_ops;
+
+ err = register_netdev(dev);
+ if (err)
+ {
+ printk (KERN_ERR DRIVER_NAME ": register_netdev() failed\n");
+ err = -ENOMEM;
+ goto freebufs;
+ }
+ printk (KERN_INFO "IrDA: Registered device %s\n", dev->name);
+
+ pci_set_drvdata(pci_dev,self);
+
+ printk (KERN_INFO DRIVER_NAME ": Using multiple tasks\n");
+
+ return 0;
+
+freebufs:
+ for (i = 0; i < TX_SLOTS; ++i)
+ kfree (self->tx_bufs[i]);
+ for (i = 0; i < RX_SLOTS; ++i)
+ kfree (self->rx_bufs[i]);
+ kfree(self->ringbuf);
+
+freeregion:
+ release_region (self->io.fir_base, self->io.fir_ext);
+
+freeself:
+ free_netdev(dev);
+
+ return err;
+}
+
+static int
+toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
+{
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
+ unsigned long flags;
+ int i = 10;
+
+ if (!self || self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+/* Flush all packets */
+ while ((i--) && (self->txpending))
+ msleep(10);
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_stopchip (self);
+ self->stopped = 1;
+ self->txpending = 0;
+
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static int
+toshoboe_wakeup (struct pci_dev *pci_dev)
+{
+ struct toshoboe_cb *self = pci_get_drvdata(pci_dev);
+ unsigned long flags;
+
+ if (!self || !self->stopped)
+ return 0;
+
+ if ((!self->irdad) && (!self->async))
+ return 0;
+
+ spin_lock_irqsave(&self->spinlock, flags);
+
+ toshoboe_startchip (self);
+ self->stopped = 0;
+
+ netif_wake_queue(self->netdev);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ return 0;
+}
+
+static struct pci_driver donauboe_pci_driver = {
+ .name = "donauboe",
+ .id_table = toshoboe_pci_tbl,
+ .probe = toshoboe_open,
+ .remove = toshoboe_close,
+ .suspend = toshoboe_gotosleep,
+ .resume = toshoboe_wakeup
+};
+
+module_pci_driver(donauboe_pci_driver);
diff --git a/drivers/staging/irda/drivers/donauboe.h b/drivers/staging/irda/drivers/donauboe.h
new file mode 100644
index 000000000000..d92d54e839b9
--- /dev/null
+++ b/drivers/staging/irda/drivers/donauboe.h
@@ -0,0 +1,362 @@
+/*********************************************************************
+ *
+ * Filename: toshoboe.h
+ * Version: 2.16
+ * Description: Driver for the Toshiba OBOE (or type-O or 701)
+ * FIR Chipset, also supports the DONAUOBOE (type-DO
+ * or d01) FIR chipset which as far as I know is
+ * register compatible.
+ * Status: Experimental.
+ * Author: James McKenzie <james@fishsoup.dhs.org>
+ * Created at: Sat May 8 12:35:27 1999
+ * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
+ * Modified: 2.16 Sat Jun 22 18:54:29 2002 (sync headers)
+ * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
+ * Modified: 2.17 jeu sep 12 08:50:20 2002 (add lock to be used by spinlocks)
+ *
+ * Copyright (c) 1999 James McKenzie, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither James McKenzie nor Cambridge University admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Applicable Models : Libretto 100/110CT and many more.
+ * Toshiba refers to this chip as the type-O IR port,
+ * or the type-DO IR port.
+ *
+ * IrDA chip set list from Toshiba Computer Engineering Corp.
+ * model method maker controller Version
+ * Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
+ * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 3020CT FIR,SIR Toshiba Oboe(Sydney)
+ * Portege 7020CT FIR,SIR ? ?
+ *
+ * Satell. 4090XCDT FIR,SIR ? ?
+ *
+ * Libretto 100CT FIR,SIR Toshiba Oboe
+ * Libretto 1000CT FIR,SIR Toshiba Oboe
+ *
+ * TECRA750DVD FIR,SIR Toshiba Oboe(Triangle) REV ID=14h
+ * TECRA780 FIR,SIR Toshiba Oboe(Sandlot) REV ID=32h,33h
+ * TECRA750CDT FIR,SIR Toshiba Oboe(Triangle) REV ID=13h,14h
+ * TECRA8000 FIR,SIR Toshiba Oboe(ISKUR) REV ID=23h
+ *
+ ********************************************************************/
+
+/* The documentation for this chip is allegedly released */
+/* However I have not seen it, not have I managed to contact */
+/* anyone who has. HOWEVER the chip bears a striking resemblance */
+/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
+/* the documentation for this is freely available at */
+/* http://www.madingley.org/james/resources/toshoboe/TMPR3922.pdf */
+/* The mapping between the registers in that document and the */
+/* Registers in the 701 oboe chip are as follows */
+
+
+/* 3922 reg 701 regs, by bit numbers */
+/* 7- 0 15- 8 24-16 31-25 */
+/* $28 0x0 0x1 */
+/* $2c SEE NOTE 1 */
+/* $30 0x6 0x7 */
+/* $34 0x8 0x9 SEE NOTE 2 */
+/* $38 0x10 0x11 */
+/* $3C 0xe SEE NOTE 3 */
+/* $40 0x12 0x13 */
+/* $44 0x14 0x15 */
+/* $48 0x16 0x17 */
+/* $4c 0x18 0x19 */
+/* $50 0x1a 0x1b */
+
+/* FIXME: could be 0x1b 0x1a here */
+
+/* $54 0x1d 0x1c */
+/* $5C 0xf SEE NOTE 4 */
+/* $130 SEE NOTE 5 */
+/* $134 SEE NOTE 6 */
+/* */
+/* NOTES: */
+/* 1. The pointer to ring is packed in most unceremoniusly */
+/* 701 Register Address bits (A9-A0 must be zero) */
+/* 0x4: A17 A16 A15 A14 A13 A12 A11 A10 */
+/* 0x5: A25 A24 A23 A22 A21 A20 A19 A18 */
+/* 0x2: 0 0 A31 A30 A29 A28 A27 A26 */
+/* */
+/* 2. The M$ drivers do a write 0x1 to 0x9, however the 3922 */
+/* documentation would suggest that a write of 0x1 to 0x8 */
+/* would be more appropriate. */
+/* */
+/* 3. This assignment is tenuous at best, register 0xe seems to */
+/* have bits arranged 0 0 0 R/W R/W R/W R/W R/W */
+/* if either of the lower two bits are set the chip seems to */
+/* switch off */
+/* */
+/* 4. Bits 7-4 seem to be different 4 seems just to be generic */
+/* receiver busy flag */
+/* */
+/* 5. and 6. The IER and ISR have a different bit assignment */
+/* The lower three bits of both read back as ones */
+/* ISR is register 0xc, IER is register 0xd */
+/* 7 6 5 4 3 2 1 0 */
+/* 0xc: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* 0xd: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
+/* TxDone xmitt done (generated only if generate interrupt bit */
+/* is set in the ring) */
+/* RxDone recv completed (or other recv condition if you set it */
+/* up */
+/* TxUnder underflow in Transmit FIFO */
+/* RxOver overflow in Recv FIFO */
+/* SipRcv received serial gap (or other condition you set) */
+/* Interrupts are enabled by writing a one to the IER register */
+/* Interrupts are cleared by writing a one to the ISR register */
+/* */
+/* 6. The remaining registers: 0x6 and 0x3 appear to be */
+/* reserved parts of 16 or 32 bit registersthe remainder */
+/* 0xa 0xb 0x1e 0x1f could possibly be (by their behaviour) */
+/* the Unicast Filter register at $58. */
+/* */
+/* 7. While the core obviously expects 32 bit accesses all the */
+/* M$ drivers do 8 bit accesses, infact the Miniport ones */
+/* write and read back the byte serveral times (why?) */
+
+
+#ifndef TOSHOBOE_H
+#define TOSHOBOE_H
+
+/* Registers */
+
+#define OBOE_IO_EXTENT 0x1f
+
+/*Receive and transmit slot pointers */
+#define OBOE_REG(i) (i+(self->base))
+#define OBOE_RXSLOT OBOE_REG(0x0)
+#define OBOE_TXSLOT OBOE_REG(0x1)
+#define OBOE_SLOT_MASK 0x3f
+
+#define OBOE_TXRING_OFFSET 0x200
+#define OBOE_TXRING_OFFSET_IN_SLOTS 0x40
+
+/*pointer to the ring */
+#define OBOE_RING_BASE0 OBOE_REG(0x4)
+#define OBOE_RING_BASE1 OBOE_REG(0x5)
+#define OBOE_RING_BASE2 OBOE_REG(0x2)
+#define OBOE_RING_BASE3 OBOE_REG(0x3)
+
+/*Number of slots in the ring */
+#define OBOE_RING_SIZE OBOE_REG(0x7)
+#define OBOE_RING_SIZE_RX4 0x00
+#define OBOE_RING_SIZE_RX8 0x01
+#define OBOE_RING_SIZE_RX16 0x03
+#define OBOE_RING_SIZE_RX32 0x07
+#define OBOE_RING_SIZE_RX64 0x0f
+#define OBOE_RING_SIZE_TX4 0x00
+#define OBOE_RING_SIZE_TX8 0x10
+#define OBOE_RING_SIZE_TX16 0x30
+#define OBOE_RING_SIZE_TX32 0x70
+#define OBOE_RING_SIZE_TX64 0xf0
+
+#define OBOE_RING_MAX_SIZE 64
+
+/*Causes the gubbins to re-examine the ring */
+#define OBOE_PROMPT OBOE_REG(0x9)
+#define OBOE_PROMPT_BIT 0x1
+
+/* Interrupt Status Register */
+#define OBOE_ISR OBOE_REG(0xc)
+/* Interrupt Enable Register */
+#define OBOE_IER OBOE_REG(0xd)
+/* Interrupt bits for IER and ISR */
+#define OBOE_INT_TXDONE 0x80
+#define OBOE_INT_RXDONE 0x40
+#define OBOE_INT_TXUNDER 0x20
+#define OBOE_INT_RXOVER 0x10
+#define OBOE_INT_SIP 0x08
+#define OBOE_INT_MASK 0xf8
+
+/*Reset Register */
+#define OBOE_CONFIG1 OBOE_REG(0xe)
+#define OBOE_CONFIG1_RST 0x01
+#define OBOE_CONFIG1_DISABLE 0x02
+#define OBOE_CONFIG1_4 0x08
+#define OBOE_CONFIG1_8 0x08
+
+#define OBOE_CONFIG1_ON 0x8
+#define OBOE_CONFIG1_RESET 0xf
+#define OBOE_CONFIG1_OFF 0xe
+
+#define OBOE_STATUS OBOE_REG(0xf)
+#define OBOE_STATUS_RXBUSY 0x10
+#define OBOE_STATUS_FIRRX 0x04
+#define OBOE_STATUS_MIRRX 0x02
+#define OBOE_STATUS_SIRRX 0x01
+
+
+/*Speed control registers */
+#define OBOE_CONFIG0L OBOE_REG(0x10)
+#define OBOE_CONFIG0H OBOE_REG(0x11)
+
+#define OBOE_CONFIG0H_TXONLOOP 0x80 /*Transmit when looping (dangerous) */
+#define OBOE_CONFIG0H_LOOP 0x40 /*Loopback Tx->Rx */
+#define OBOE_CONFIG0H_ENTX 0x10 /*Enable Tx */
+#define OBOE_CONFIG0H_ENRX 0x08 /*Enable Rx */
+#define OBOE_CONFIG0H_ENDMAC 0x04 /*Enable/reset* the DMA controller */
+#define OBOE_CONFIG0H_RCVANY 0x02 /*DMA mode 1=bytes, 0=dwords */
+
+#define OBOE_CONFIG0L_CRC16 0x80 /*CRC 1=16 bit 0=32 bit */
+#define OBOE_CONFIG0L_ENFIR 0x40 /*Enable FIR */
+#define OBOE_CONFIG0L_ENMIR 0x20 /*Enable MIR */
+#define OBOE_CONFIG0L_ENSIR 0x10 /*Enable SIR */
+#define OBOE_CONFIG0L_ENSIRF 0x08 /*Enable SIR framer */
+#define OBOE_CONFIG0L_SIRTEST 0x04 /*Enable SIR framer in MIR and FIR */
+#define OBOE_CONFIG0L_INVERTTX 0x02 /*Invert Tx Line */
+#define OBOE_CONFIG0L_INVERTRX 0x01 /*Invert Rx Line */
+
+#define OBOE_BOF OBOE_REG(0x12)
+#define OBOE_EOF OBOE_REG(0x13)
+
+#define OBOE_ENABLEL OBOE_REG(0x14)
+#define OBOE_ENABLEH OBOE_REG(0x15)
+
+#define OBOE_ENABLEH_PHYANDCLOCK 0x80 /*Toggle low to copy config in */
+#define OBOE_ENABLEH_CONFIGERR 0x40
+#define OBOE_ENABLEH_FIRON 0x20
+#define OBOE_ENABLEH_MIRON 0x10
+#define OBOE_ENABLEH_SIRON 0x08
+#define OBOE_ENABLEH_ENTX 0x04
+#define OBOE_ENABLEH_ENRX 0x02
+#define OBOE_ENABLEH_CRC16 0x01
+
+#define OBOE_ENABLEL_BROADCAST 0x01
+
+#define OBOE_CURR_PCONFIGL OBOE_REG(0x16) /*Current config */
+#define OBOE_CURR_PCONFIGH OBOE_REG(0x17)
+
+#define OBOE_NEW_PCONFIGL OBOE_REG(0x18)
+#define OBOE_NEW_PCONFIGH OBOE_REG(0x19)
+
+#define OBOE_PCONFIGH_BAUDMASK 0xfc
+#define OBOE_PCONFIGH_WIDTHMASK 0x04
+#define OBOE_PCONFIGL_WIDTHMASK 0xe0
+#define OBOE_PCONFIGL_PREAMBLEMASK 0x1f
+
+#define OBOE_PCONFIG_BAUDMASK 0xfc00
+#define OBOE_PCONFIG_BAUDSHIFT 10
+#define OBOE_PCONFIG_WIDTHMASK 0x04e0
+#define OBOE_PCONFIG_WIDTHSHIFT 5
+#define OBOE_PCONFIG_PREAMBLEMASK 0x001f
+#define OBOE_PCONFIG_PREAMBLESHIFT 0
+
+#define OBOE_MAXLENL OBOE_REG(0x1a)
+#define OBOE_MAXLENH OBOE_REG(0x1b)
+
+#define OBOE_RXCOUNTH OBOE_REG(0x1c) /*Reset on recipt */
+#define OBOE_RXCOUNTL OBOE_REG(0x1d) /*of whole packet */
+
+/* The PCI ID of the OBOE chip */
+#ifndef PCI_DEVICE_ID_FIR701
+#define PCI_DEVICE_ID_FIR701 0x0701
+#endif
+
+#ifndef PCI_DEVICE_ID_FIRD01
+#define PCI_DEVICE_ID_FIRD01 0x0d01
+#endif
+
+struct OboeSlot
+{
+ __u16 len; /*Tweleve bits of packet length */
+ __u8 unused;
+ __u8 control; /*Slot control/status see below */
+ __u32 address; /*Slot buffer address */
+}
+__packed;
+
+#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
+
+struct OboeRing
+{
+ struct OboeSlot rx[OBOE_NTASKS];
+ struct OboeSlot tx[OBOE_NTASKS];
+};
+
+#define OBOE_RING_LEN (sizeof(struct OboeRing))
+
+
+#define OBOE_CTL_TX_HW_OWNS 0x80 /*W/R This slot owned by the hardware */
+#define OBOE_CTL_TX_DISTX_CRC 0x40 /*W Disable CRC generation for [FM]IR */
+#define OBOE_CTL_TX_BAD_CRC 0x20 /*W Generate bad CRC */
+#define OBOE_CTL_TX_SIP 0x10 /*W Generate an SIP after xmittion */
+#define OBOE_CTL_TX_MKUNDER 0x08 /*W Generate an underrun error */
+#define OBOE_CTL_TX_RTCENTX 0x04 /*W Enable receiver and generate TXdone */
+ /* After this slot is processed */
+#define OBOE_CTL_TX_UNDER 0x01 /*R Set by hardware to indicate underrun */
+
+
+#define OBOE_CTL_RX_HW_OWNS 0x80 /*W/R This slot owned by hardware */
+#define OBOE_CTL_RX_PHYERR 0x40 /*R Decoder error on receiption */
+#define OBOE_CTL_RX_CRCERR 0x20 /*R CRC error only set for [FM]IR */
+#define OBOE_CTL_RX_LENGTH 0x10 /*R Packet > max Rx length */
+#define OBOE_CTL_RX_OVER 0x08 /*R set to indicate an overflow */
+#define OBOE_CTL_RX_SIRBAD 0x04 /*R SIR had BOF in packet or ABORT sequence */
+#define OBOE_CTL_RX_RXEOF 0x02 /*R Finished receiving on this slot */
+
+
+struct toshoboe_cb
+{
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct tty_driver ttydev;
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ __u32 flags; /* Interface flags */
+
+ struct pci_dev *pdev; /*PCI device */
+ int base; /*IO base */
+
+
+ int txpending; /*how many tx's are pending */
+ int txs, rxs; /*Which slots are we at */
+
+ int irdad; /*Driver under control of netdev end */
+ int async; /*Driver under control of async end */
+
+
+ int stopped; /*Stopped by some or other APM stuff */
+
+ int filter; /*In SIR mode do we want to receive
+ frames or byte ranges */
+
+ void *ringbuf; /*The ring buffer */
+ struct OboeRing *ring; /*The ring */
+
+ void *tx_bufs[OBOE_RING_MAX_SIZE]; /*The buffers */
+ void *rx_bufs[OBOE_RING_MAX_SIZE];
+
+
+ int speed; /*Current setting of the speed */
+ int new_speed; /*Set to request a speed change */
+
+/* The spinlock protect critical parts of the driver.
+ * Locking is done like this :
+ * spin_lock_irqsave(&self->spinlock, flags);
+ * Releasing the lock :
+ * spin_unlock_irqrestore(&self->spinlock, flags);
+ */
+ spinlock_t spinlock;
+ /* Used for the probe and diagnostics code */
+ int int_rx;
+ int int_tx;
+ int int_txunder;
+ int int_rxover;
+ int int_sip;
+};
+
+
+#endif
diff --git a/drivers/staging/irda/drivers/esi-sir.c b/drivers/staging/irda/drivers/esi-sir.c
new file mode 100644
index 000000000000..019a3e848bcb
--- /dev/null
+++ b/drivers/staging/irda/drivers/esi-sir.c
@@ -0,0 +1,157 @@
+/*********************************************************************
+ *
+ * Filename: esi.c
+ * Version: 1.6
+ * Description: Driver for the Extended Systems JetEye PC dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 21 18:54:38 1998
+ * Modified at: Sun Oct 27 22:01:04 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
+ * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
+ * Copyright (c) 2002 Martin Diehl, <mad@mdiehl.de>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int esi_open(struct sir_dev *);
+static int esi_close(struct sir_dev *);
+static int esi_change_speed(struct sir_dev *, unsigned);
+static int esi_reset(struct sir_dev *);
+
+static struct dongle_driver esi = {
+ .owner = THIS_MODULE,
+ .driver_name = "JetEye PC ESI-9680 PC",
+ .type = IRDA_ESI_DONGLE,
+ .open = esi_open,
+ .close = esi_close,
+ .reset = esi_reset,
+ .set_speed = esi_change_speed,
+};
+
+static int __init esi_sir_init(void)
+{
+ return irda_register_dongle(&esi);
+}
+
+static void __exit esi_sir_cleanup(void)
+{
+ irda_unregister_dongle(&esi);
+}
+
+static int esi_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power up and set dongle to 9600 baud */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int esi_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function esi_change_speed (task)
+ *
+ * Set the speed for the Extended Systems JetEye PC ESI-9680 type dongle
+ * Apparently (see old esi-driver) no delays are needed here...
+ *
+ */
+static int esi_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int ret = 0;
+ int dtr, rts;
+
+ switch (speed) {
+ case 19200:
+ dtr = TRUE;
+ rts = FALSE;
+ break;
+ case 115200:
+ dtr = rts = TRUE;
+ break;
+ default:
+ ret = -EINVAL;
+ speed = 9600;
+ /* fall through */
+ case 9600:
+ dtr = FALSE;
+ rts = TRUE;
+ break;
+ }
+
+ /* Change speed of dongle */
+ sirdev_set_dtr_rts(dev, dtr, rts);
+ dev->speed = speed;
+
+ return ret;
+}
+
+/*
+ * Function esi_reset (task)
+ *
+ * Reset dongle;
+ *
+ */
+static int esi_reset(struct sir_dev *dev)
+{
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ /* Hm, the old esi-driver left the dongle unpowered relying on
+ * the following speed change to repower. This might work for
+ * the esi because we only need the modem lines. However, now the
+ * general rule is reset must bring the dongle to some working
+ * well-known state because speed change might write to registers.
+ * The old esi-driver didn't any delay here - let's hope it' fine.
+ */
+
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Extended Systems JetEye PC dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-1"); /* IRDA_ESI_DONGLE */
+
+module_init(esi_sir_init);
+module_exit(esi_sir_cleanup);
+
diff --git a/drivers/staging/irda/drivers/girbil-sir.c b/drivers/staging/irda/drivers/girbil-sir.c
new file mode 100644
index 000000000000..7e0a5b8c6d53
--- /dev/null
+++ b/drivers/staging/irda/drivers/girbil-sir.c
@@ -0,0 +1,252 @@
+/*********************************************************************
+ *
+ * Filename: girbil.c
+ * Version: 1.2
+ * Description: Implementation for the Greenwich GIrBIL dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Feb 6 21:02:33 1999
+ * Modified at: Fri Dec 17 09:13:20 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int girbil_reset(struct sir_dev *dev);
+static int girbil_open(struct sir_dev *dev);
+static int girbil_close(struct sir_dev *dev);
+static int girbil_change_speed(struct sir_dev *dev, unsigned speed);
+
+/* Control register 1 */
+#define GIRBIL_TXEN 0x01 /* Enable transmitter */
+#define GIRBIL_RXEN 0x02 /* Enable receiver */
+#define GIRBIL_ECAN 0x04 /* Cancel self emitted data */
+#define GIRBIL_ECHO 0x08 /* Echo control characters */
+
+/* LED Current Register (0x2) */
+#define GIRBIL_HIGH 0x20
+#define GIRBIL_MEDIUM 0x21
+#define GIRBIL_LOW 0x22
+
+/* Baud register (0x3) */
+#define GIRBIL_2400 0x30
+#define GIRBIL_4800 0x31
+#define GIRBIL_9600 0x32
+#define GIRBIL_19200 0x33
+#define GIRBIL_38400 0x34
+#define GIRBIL_57600 0x35
+#define GIRBIL_115200 0x36
+
+/* Mode register (0x4) */
+#define GIRBIL_IRDA 0x40
+#define GIRBIL_ASK 0x41
+
+/* Control register 2 (0x5) */
+#define GIRBIL_LOAD 0x51 /* Load the new baud rate value */
+
+static struct dongle_driver girbil = {
+ .owner = THIS_MODULE,
+ .driver_name = "Greenwich GIrBIL",
+ .type = IRDA_GIRBIL_DONGLE,
+ .open = girbil_open,
+ .close = girbil_close,
+ .reset = girbil_reset,
+ .set_speed = girbil_change_speed,
+};
+
+static int __init girbil_sir_init(void)
+{
+ return irda_register_dongle(&girbil);
+}
+
+static void __exit girbil_sir_cleanup(void)
+{
+ irda_unregister_dongle(&girbil);
+}
+
+static int girbil_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x03;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int girbil_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function girbil_change_speed (dev, speed)
+ *
+ * Set the speed for the Girbil type dongle.
+ *
+ */
+
+#define GIRBIL_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[2];
+ static int ret = 0;
+
+ /* dongle alread reset - port and dongle at default speed */
+
+ switch(state) {
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ /* Set DTR and Clear RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ udelay(25); /* better wait a little while */
+
+ ret = 0;
+ switch (speed) {
+ default:
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = GIRBIL_9600;
+ break;
+ case 19200:
+ control[0] = GIRBIL_19200;
+ break;
+ case 34800:
+ control[0] = GIRBIL_38400;
+ break;
+ case 57600:
+ control[0] = GIRBIL_57600;
+ break;
+ case 115200:
+ control[0] = GIRBIL_115200;
+ break;
+ }
+ control[1] = GIRBIL_LOAD;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 2);
+
+ dev->speed = speed;
+
+ state = GIRBIL_STATE_WAIT_SPEED;
+ delay = 100;
+ break;
+
+ case GIRBIL_STATE_WAIT_SPEED:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ udelay(25); /* better wait a little while */
+ break;
+
+ default:
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function girbil_reset (driver)
+ *
+ * This function resets the girbil dongle.
+ *
+ * Algorithm:
+ * 0. set RTS, and wait at least 5 ms
+ * 1. clear RTS
+ */
+
+
+#define GIRBIL_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET + 1)
+#define GIRBIL_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET + 2)
+#define GIRBIL_STATE_WAIT3_RESET (SIRDEV_STATE_DONGLE_RESET + 3)
+
+static int girbil_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
+ int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ /* Reset dongle */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ /* Sleep at least 5 ms */
+ delay = 20;
+ state = GIRBIL_STATE_WAIT1_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT1_RESET:
+ /* Set DTR and clear RTS to enter command mode */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ delay = 20;
+ state = GIRBIL_STATE_WAIT2_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT2_RESET:
+ /* Write control byte */
+ sirdev_raw_write(dev, &control, 1);
+ delay = 20;
+ state = GIRBIL_STATE_WAIT3_RESET;
+ break;
+
+ case GIRBIL_STATE_WAIT3_RESET:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ dev->speed = 9600;
+ break;
+
+ default:
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
+ ret = -1;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Greenwich GIrBIL dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-4"); /* IRDA_GIRBIL_DONGLE */
+
+module_init(girbil_sir_init);
+module_exit(girbil_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/irda-usb.c b/drivers/staging/irda/drivers/irda-usb.c
new file mode 100644
index 000000000000..723e49bc4baa
--- /dev/null
+++ b/drivers/staging/irda/drivers/irda-usb.c
@@ -0,0 +1,1914 @@
+/*****************************************************************************
+ *
+ * Filename: irda-usb.c
+ * Version: 0.10
+ * Description: IrDA-USB Driver
+ * Status: Experimental
+ * Author: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+ * Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+ * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+ * Copyright (C) 2004, SigmaTel, Inc. <irquality@sigmatel.com>
+ * Copyright (C) 2005, Milan Beno <beno@pobox.sk>
+ * Copyright (C) 2006, Nick Fedchik <nick@fedchik.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * IMPORTANT NOTE
+ * --------------
+ *
+ * As of kernel 2.5.20, this is the state of compliance and testing of
+ * this driver (irda-usb) with regards to the USB low level drivers...
+ *
+ * This driver has been tested SUCCESSFULLY with the following drivers :
+ * o usb-uhci-hcd (For Intel/Via USB controllers)
+ * o uhci-hcd (Alternate/JE driver for Intel/Via USB controllers)
+ * o ohci-hcd (For other USB controllers)
+ *
+ * This driver has NOT been tested with the following drivers :
+ * o ehci-hcd (USB 2.0 controllers)
+ *
+ * Note that all HCD drivers do URB_ZERO_PACKET and timeout properly,
+ * so we don't have to worry about that anymore.
+ * One common problem is the failure to set the address on the dongle,
+ * but this happens before the driver gets loaded...
+ *
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+
+#include "irda-usb.h"
+
+/*------------------------------------------------------------------*/
+
+static int qos_mtt_bits = 0;
+
+/* These are the currently known IrDA USB dongles. Add new dongles here */
+static const struct usb_device_id dongles[] = {
+ /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */
+ { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */
+ { USB_DEVICE(0x4428, 0x012), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* KC Technology Inc., KC-180 USB IrDA Device */
+ { USB_DEVICE(0x50f, 0x180), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* Extended Systems, Inc., XTNDAccess IrDA USB (ESI-9685) */
+ { USB_DEVICE(0x8e9, 0x100), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
+ /* SigmaTel STIR4210/4220/4116 USB IrDA (VFIR) Bridge */
+ { USB_DEVICE(0x66f, 0x4210), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
+ { USB_DEVICE(0x66f, 0x4220), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
+ { USB_DEVICE(0x66f, 0x4116), .driver_info = IUC_STIR421X | IUC_SPEED_BUG },
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_APP_SPEC,
+ .bInterfaceSubClass = USB_CLASS_IRDA,
+ .driver_info = IUC_DEFAULT, },
+ { }, /* The end */
+};
+
+/*
+ * Important note :
+ * Devices based on the SigmaTel chipset (0x66f, 0x4200) are not designed
+ * using the "USB-IrDA specification" (yes, there exist such a thing), and
+ * therefore not supported by this driver (don't add them above).
+ * There is a Linux driver, stir4200, that support those USB devices.
+ * Jean II
+ */
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+/*------------------------------------------------------------------*/
+
+static void irda_usb_init_qos(struct irda_usb_cb *self) ;
+static struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf);
+static void irda_usb_disconnect(struct usb_interface *intf);
+static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self);
+static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int irda_usb_open(struct irda_usb_cb *self);
+static void irda_usb_close(struct irda_usb_cb *self);
+static void speed_bulk_callback(struct urb *urb);
+static void write_bulk_callback(struct urb *urb);
+static void irda_usb_receive(struct urb *urb);
+static void irda_usb_rx_defer_expired(unsigned long data);
+static int irda_usb_net_open(struct net_device *dev);
+static int irda_usb_net_close(struct net_device *dev);
+static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void irda_usb_net_timeout(struct net_device *dev);
+
+/************************ TRANSMIT ROUTINES ************************/
+/*
+ * Receive packets from the IrDA stack and send them on the USB pipe.
+ * Handle speed change, timeout and lot's of ugliness...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_build_header(self, skb, header)
+ *
+ * Builds USB-IrDA outbound header
+ *
+ * When we send an IrDA frame over an USB pipe, we add to it a 1 byte
+ * header. This function create this header with the proper values.
+ *
+ * Important note : the USB-IrDA spec 1.0 say very clearly in chapter 5.4.2.2
+ * that the setting of the link speed and xbof number in this outbound header
+ * should be applied *AFTER* the frame has been sent.
+ * Unfortunately, some devices are not compliant with that... It seems that
+ * reading the spec is far too difficult...
+ * Jean II
+ */
+static void irda_usb_build_header(struct irda_usb_cb *self,
+ __u8 *header,
+ int force)
+{
+ /* Here we check if we have an STIR421x chip,
+ * and if either speed or xbofs (or both) needs
+ * to be changed.
+ */
+ if (self->capability & IUC_STIR421X &&
+ ((self->new_speed != -1) || (self->new_xbofs != -1))) {
+
+ /* With STIR421x, speed and xBOFs must be set at the same
+ * time, even if only one of them changes.
+ */
+ if (self->new_speed == -1)
+ self->new_speed = self->speed ;
+
+ if (self->new_xbofs == -1)
+ self->new_xbofs = self->xbofs ;
+ }
+
+ /* Set the link speed */
+ if (self->new_speed != -1) {
+ /* Hum... Ugly hack :-(
+ * Some device are not compliant with the spec and change
+ * parameters *before* sending the frame. - Jean II
+ */
+ if ((self->capability & IUC_SPEED_BUG) &&
+ (!force) && (self->speed != -1)) {
+ /* No speed and xbofs change here
+ * (we'll do it later in the write callback) */
+ pr_debug("%s(), not changing speed yet\n", __func__);
+ *header = 0;
+ return;
+ }
+
+ pr_debug("%s(), changing speed to %d\n",
+ __func__, self->new_speed);
+ self->speed = self->new_speed;
+ /* We will do ` self->new_speed = -1; ' in the completion
+ * handler just in case the current URB fail - Jean II */
+
+ switch (self->speed) {
+ case 2400:
+ *header = SPEED_2400;
+ break;
+ default:
+ case 9600:
+ *header = SPEED_9600;
+ break;
+ case 19200:
+ *header = SPEED_19200;
+ break;
+ case 38400:
+ *header = SPEED_38400;
+ break;
+ case 57600:
+ *header = SPEED_57600;
+ break;
+ case 115200:
+ *header = SPEED_115200;
+ break;
+ case 576000:
+ *header = SPEED_576000;
+ break;
+ case 1152000:
+ *header = SPEED_1152000;
+ break;
+ case 4000000:
+ *header = SPEED_4000000;
+ self->new_xbofs = 0;
+ break;
+ case 16000000:
+ *header = SPEED_16000000;
+ self->new_xbofs = 0;
+ break;
+ }
+ } else
+ /* No change */
+ *header = 0;
+
+ /* Set the negotiated additional XBOFS */
+ if (self->new_xbofs != -1) {
+ pr_debug("%s(), changing xbofs to %d\n",
+ __func__, self->new_xbofs);
+ self->xbofs = self->new_xbofs;
+ /* We will do ` self->new_xbofs = -1; ' in the completion
+ * handler just in case the current URB fail - Jean II */
+
+ switch (self->xbofs) {
+ case 48:
+ *header |= 0x10;
+ break;
+ case 28:
+ case 24: /* USB spec 1.0 says 24 */
+ *header |= 0x20;
+ break;
+ default:
+ case 12:
+ *header |= 0x30;
+ break;
+ case 5: /* Bug in IrLAP spec? (should be 6) */
+ case 6:
+ *header |= 0x40;
+ break;
+ case 3:
+ *header |= 0x50;
+ break;
+ case 2:
+ *header |= 0x60;
+ break;
+ case 1:
+ *header |= 0x70;
+ break;
+ case 0:
+ *header |= 0x80;
+ break;
+ }
+ }
+}
+
+/*
+* calculate turnaround time for SigmaTel header
+*/
+static __u8 get_turnaround_time(struct sk_buff *skb)
+{
+ int turnaround_time = irda_get_mtt(skb);
+
+ if ( turnaround_time == 0 )
+ return 0;
+ else if ( turnaround_time <= 10 )
+ return 1;
+ else if ( turnaround_time <= 50 )
+ return 2;
+ else if ( turnaround_time <= 100 )
+ return 3;
+ else if ( turnaround_time <= 500 )
+ return 4;
+ else if ( turnaround_time <= 1000 )
+ return 5;
+ else if ( turnaround_time <= 5000 )
+ return 6;
+ else
+ return 7;
+}
+
+
+/*------------------------------------------------------------------*/
+/*
+ * Send a command to change the speed of the dongle
+ * Need to be called with spinlock on.
+ */
+static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
+{
+ __u8 *frame;
+ struct urb *urb;
+ int ret;
+
+ pr_debug("%s(), speed=%d, xbofs=%d\n", __func__,
+ self->new_speed, self->new_xbofs);
+
+ /* Grab the speed URB */
+ urb = self->speed_urb;
+ if (urb->status != 0) {
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
+ return;
+ }
+
+ /* Allocate the fake frame */
+ frame = self->speed_buff;
+
+ /* Set the new speed and xbofs in this fake frame */
+ irda_usb_build_header(self, frame, 1);
+
+ if (self->capability & IUC_STIR421X) {
+ if (frame[0] == 0) return ; // do nothing if no change
+ frame[1] = 0; // other parameters don't change here
+ frame[2] = 0;
+ }
+
+ /* Submit the 0 length IrDA frame to trigger new speed settings */
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
+ frame, IRDA_USB_SPEED_MTU,
+ speed_bulk_callback, self);
+ urb->transfer_buffer_length = self->header_length;
+ urb->transfer_flags = 0;
+
+ /* Irq disabled -> GFP_ATOMIC */
+ if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
+ net_warn_ratelimited("%s(), failed Speed URB\n", __func__);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Speed URB callback
+ * Now, we can only get called for the speed URB.
+ */
+static void speed_bulk_callback(struct urb *urb)
+{
+ struct irda_usb_cb *self = urb->context;
+
+ /* We should always have a context */
+ IRDA_ASSERT(self != NULL, return;);
+ /* We should always be called for the speed URB */
+ IRDA_ASSERT(urb == self->speed_urb, return;);
+
+ /* Check for timeout and other USB nasties */
+ if (urb->status != 0) {
+ /* I get a lot of -ECONNABORTED = -103 here - Jean II */
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
+
+ /* Don't do anything here, that might confuse the USB layer.
+ * Instead, we will wait for irda_usb_net_timeout(), the
+ * network layer watchdog, to fix the situation.
+ * Jean II */
+ /* A reset of the dongle might be welcomed here - Jean II */
+ return;
+ }
+
+ /* urb is now available */
+ //urb->status = 0; -> tested above
+
+ /* New speed and xbof is now committed in hardware */
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+
+ /* Allow the stack to send more packets */
+ netif_wake_queue(self->netdev);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Send an IrDA frame to the USB dongle (for transmission)
+ */
+static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct irda_usb_cb *self = netdev_priv(netdev);
+ struct urb *urb = self->tx_urb;
+ unsigned long flags;
+ s32 speed;
+ s16 xbofs;
+ int res, mtt;
+
+ pr_debug("%s() on %s\n", __func__, netdev->name);
+
+ netif_stop_queue(netdev);
+
+ /* Protect us from USB callbacks, net watchdog and else. */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if the device is still there.
+ * We need to check self->present under the spinlock because
+ * of irda_usb_disconnect() is synchronous - Jean II */
+ if (!self->present) {
+ pr_debug("%s(), Device is gone...\n", __func__);
+ goto drop;
+ }
+
+ /* Check if we need to change the number of xbofs */
+ xbofs = irda_get_next_xbofs(skb);
+ if ((xbofs != self->xbofs) && (xbofs != -1)) {
+ self->new_xbofs = xbofs;
+ }
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->speed) && (speed != -1)) {
+ /* Set the desired speed */
+ self->new_speed = speed;
+
+ /* Check for empty frame */
+ if (!skb->len) {
+ /* IrLAP send us an empty frame to make us change the
+ * speed. Changing speed with the USB adapter is in
+ * fact sending an empty frame to the adapter, so we
+ * could just let the present function do its job.
+ * However, we would wait for min turn time,
+ * do an extra memcpy and increment packet counters...
+ * Jean II */
+ irda_usb_change_speed_xbofs(self);
+ netif_trans_update(netdev);
+ /* Will netif_wake_queue() in callback */
+ goto drop;
+ }
+ }
+
+ if (urb->status != 0) {
+ net_warn_ratelimited("%s(), URB still in use!\n", __func__);
+ goto drop;
+ }
+
+ skb_copy_from_linear_data(skb, self->tx_buff + self->header_length, skb->len);
+
+ /* Change setting for next frame */
+ if (self->capability & IUC_STIR421X) {
+ __u8 turnaround_time;
+ __u8* frame = self->tx_buff;
+ turnaround_time = get_turnaround_time( skb );
+ irda_usb_build_header(self, frame, 0);
+ frame[2] = turnaround_time;
+ if ((skb->len != 0) &&
+ ((skb->len % 128) == 0) &&
+ ((skb->len % 512) != 0)) {
+ /* add extra byte for special SigmaTel feature */
+ frame[1] = 1;
+ skb_put(skb, 1);
+ } else {
+ frame[1] = 0;
+ }
+ } else {
+ irda_usb_build_header(self, self->tx_buff, 0);
+ }
+
+ /* FIXME: Make macro out of this one */
+ ((struct irda_skb_cb *)skb->cb)->context = self;
+
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
+ self->tx_buff, skb->len + self->header_length,
+ write_bulk_callback, skb);
+
+ /* This flag (URB_ZERO_PACKET) indicates that what we send is not
+ * a continuous stream of data but separate packets.
+ * In this case, the USB layer will insert an empty USB frame (TD)
+ * after each of our packets that is exact multiple of the frame size.
+ * This is how the dongle will detect the end of packet - Jean II */
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ /* Generate min turn time. FIXME: can we do better than this? */
+ /* Trying to a turnaround time at this level is trying to measure
+ * processor clock cycle with a wrist-watch, approximate at best...
+ *
+ * What we know is the last time we received a frame over USB.
+ * Due to latency over USB that depend on the USB load, we don't
+ * know when this frame was received over IrDA (a few ms before ?)
+ * Then, same story for our outgoing frame...
+ *
+ * In theory, the USB dongle is supposed to handle the turnaround
+ * by itself (spec 1.0, chater 4, page 6). Who knows ??? That's
+ * why this code is enabled only for dongles that doesn't meet
+ * the spec.
+ * Jean II */
+ if (self->capability & IUC_NO_TURN) {
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ int diff;
+ diff = ktime_us_delta(ktime_get(), self->stamp);
+#ifdef IU_USB_MIN_RTT
+ /* Factor in USB delays -> Get rid of udelay() that
+ * would be lost in the noise - Jean II */
+ diff += IU_USB_MIN_RTT;
+#endif /* IU_USB_MIN_RTT */
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff) {
+ mtt -= diff;
+ if (mtt > 1000)
+ mdelay(mtt/1000);
+ else
+ udelay(mtt);
+ }
+ }
+ }
+
+ /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
+ if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
+ net_warn_ratelimited("%s(), failed Tx URB\n", __func__);
+ netdev->stats.tx_errors++;
+ /* Let USB recover : We will catch that in the watchdog */
+ /*netif_start_queue(netdev);*/
+ } else {
+ /* Increment packet stats */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ netif_trans_update(netdev);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return NETDEV_TX_OK;
+
+drop:
+ /* Drop silently the skb and exit */
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Note : this function will be called only for tx_urb...
+ */
+static void write_bulk_callback(struct urb *urb)
+{
+ unsigned long flags;
+ struct sk_buff *skb = urb->context;
+ struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
+
+ /* We should always have a context */
+ IRDA_ASSERT(self != NULL, return;);
+ /* We should always be called for the speed URB */
+ IRDA_ASSERT(urb == self->tx_urb, return;);
+
+ /* Free up the skb */
+ dev_kfree_skb_any(skb);
+ urb->context = NULL;
+
+ /* Check for timeout and other USB nasties */
+ if (urb->status != 0) {
+ /* I get a lot of -ECONNABORTED = -103 here - Jean II */
+ pr_debug("%s(), URB complete status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
+
+ /* Don't do anything here, that might confuse the USB layer,
+ * and we could go in recursion and blow the kernel stack...
+ * Instead, we will wait for irda_usb_net_timeout(), the
+ * network layer watchdog, to fix the situation.
+ * Jean II */
+ /* A reset of the dongle might be welcomed here - Jean II */
+ return;
+ }
+
+ /* urb is now available */
+ //urb->status = 0; -> tested above
+
+ /* Make sure we read self->present properly */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* If the network is closed, stop everything */
+ if ((!self->netopen) || (!self->present)) {
+ pr_debug("%s(), Network is gone...\n", __func__);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+ }
+
+ /* If changes to speed or xbofs is pending... */
+ if ((self->new_speed != -1) || (self->new_xbofs != -1)) {
+ if ((self->new_speed != self->speed) ||
+ (self->new_xbofs != self->xbofs)) {
+ /* We haven't changed speed yet (because of
+ * IUC_SPEED_BUG), so do it now - Jean II */
+ pr_debug("%s(), Changing speed now...\n", __func__);
+ irda_usb_change_speed_xbofs(self);
+ } else {
+ /* New speed and xbof is now committed in hardware */
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+ /* Done, waiting for next packet */
+ netif_wake_queue(self->netdev);
+ }
+ } else {
+ /* Otherwise, allow the stack to send more packets */
+ netif_wake_queue(self->netdev);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog timer from the network layer.
+ * After a predetermined timeout, if we don't give confirmation that
+ * the packet has been sent (i.e. no call to netif_wake_queue()),
+ * the network layer will call this function.
+ * Note that URB that we submit have also a timeout. When the URB timeout
+ * expire, the normal URB callback is called (write_bulk_callback()).
+ */
+static void irda_usb_net_timeout(struct net_device *netdev)
+{
+ unsigned long flags;
+ struct irda_usb_cb *self = netdev_priv(netdev);
+ struct urb *urb;
+ int done = 0; /* If we have made any progress */
+
+ pr_debug("%s(), Network layer thinks we timed out!\n", __func__);
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Protect us from USB callbacks, net Tx and else. */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* self->present *MUST* be read under spinlock */
+ if (!self->present) {
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return;
+ }
+
+ /* Check speed URB */
+ urb = self->speed_urb;
+ if (urb->status != 0) {
+ pr_debug("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
+
+ switch (urb->status) {
+ case -EINPROGRESS:
+ usb_unlink_urb(urb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, we will come back here.
+ * Jean II */
+ done = 1;
+ break;
+ case -ECONNRESET:
+ case -ENOENT: /* urb unlinked by us */
+ default: /* ??? - Play safe */
+ urb->status = 0;
+ netif_wake_queue(self->netdev);
+ done = 1;
+ break;
+ }
+ }
+
+ /* Check Tx URB */
+ urb = self->tx_urb;
+ if (urb->status != 0) {
+ struct sk_buff *skb = urb->context;
+
+ pr_debug("%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n",
+ netdev->name, urb->status, urb->transfer_flags);
+
+ /* Increase error count */
+ netdev->stats.tx_errors++;
+
+#ifdef IU_BUG_KICK_TIMEOUT
+ /* Can't be a bad idea to reset the speed ;-) - Jean II */
+ if(self->new_speed == -1)
+ self->new_speed = self->speed;
+ if(self->new_xbofs == -1)
+ self->new_xbofs = self->xbofs;
+ irda_usb_change_speed_xbofs(self);
+#endif /* IU_BUG_KICK_TIMEOUT */
+
+ switch (urb->status) {
+ case -EINPROGRESS:
+ usb_unlink_urb(urb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, because urb->status will
+ * be -ENOENT. We will fix that at the next watchdog,
+ * leaving more time to USB to recover...
+ * Jean II */
+ done = 1;
+ break;
+ case -ECONNRESET:
+ case -ENOENT: /* urb unlinked by us */
+ default: /* ??? - Play safe */
+ if(skb != NULL) {
+ dev_kfree_skb_any(skb);
+ urb->context = NULL;
+ }
+ urb->status = 0;
+ netif_wake_queue(self->netdev);
+ done = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Maybe we need a reset */
+ /* Note : Some drivers seem to use a usb_set_interface() when they
+ * need to reset the hardware. Hum...
+ */
+
+ /* if(done == 0) */
+}
+
+/************************* RECEIVE ROUTINES *************************/
+/*
+ * Receive packets from the USB layer stack and pass them to the IrDA stack.
+ * Try to work around USB failures...
+ */
+
+/*
+ * Note :
+ * Some of you may have noticed that most dongle have an interrupt in pipe
+ * that we don't use. Here is the little secret...
+ * When we hang a Rx URB on the bulk in pipe, it generates some USB traffic
+ * in every USB frame. This is unnecessary overhead.
+ * The interrupt in pipe will generate an event every time a packet is
+ * received. Reading an interrupt pipe adds minimal overhead, but has some
+ * latency (~1ms).
+ * If we are connected (speed != 9600), we want to minimise latency, so
+ * we just always hang the Rx URB and ignore the interrupt.
+ * If we are not connected (speed == 9600), there is usually no Rx traffic,
+ * and we want to minimise the USB overhead. In this case we should wait
+ * on the interrupt pipe and hang the Rx URB only when an interrupt is
+ * received.
+ * Jean II
+ *
+ * Note : don't read the above as what we are currently doing, but as
+ * something we could do with KC dongle. Also don't forget that the
+ * interrupt pipe is not part of the original standard, so this would
+ * need to be optional...
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Submit a Rx URB to the USB layer to handle reception of a frame
+ * Mostly called by the completion callback of the previous URB.
+ *
+ * Jean II
+ */
+static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struct urb *urb)
+{
+ struct irda_skb_cb *cb;
+ int ret;
+
+ /* This should never happen */
+ IRDA_ASSERT(skb != NULL, return;);
+ IRDA_ASSERT(urb != NULL, return;);
+
+ /* Save ourselves in the skb */
+ cb = (struct irda_skb_cb *) skb->cb;
+ cb->context = self;
+
+ /* Reinitialize URB */
+ usb_fill_bulk_urb(urb, self->usbdev,
+ usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
+ skb->data, IRDA_SKB_MAX_MTU,
+ irda_usb_receive, skb);
+ urb->status = 0;
+
+ /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ /* If this ever happen, we are in deep s***.
+ * Basically, the Rx path will stop... */
+ net_warn_ratelimited("%s(), Failed to submit Rx URB %d\n",
+ __func__, ret);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_receive(urb)
+ *
+ * Called by the USB subsystem when a frame has been received
+ *
+ */
+static void irda_usb_receive(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct sk_buff *newskb;
+ struct sk_buff *dataskb;
+ struct urb *next_urb;
+ unsigned int len, docopy;
+
+ pr_debug("%s(), len=%d\n", __func__, urb->actual_length);
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* If the network is closed or the device gone, stop everything */
+ if ((!self->netopen) || (!self->present)) {
+ pr_debug("%s(), Network is gone!\n", __func__);
+ /* Don't re-submit the URB : will stall the Rx path */
+ return;
+ }
+
+ /* Check the status */
+ if (urb->status != 0) {
+ switch (urb->status) {
+ case -EILSEQ:
+ self->netdev->stats.rx_crc_errors++;
+ /* Also precursor to a hot-unplug on UHCI. */
+ /* Fallthrough... */
+ case -ECONNRESET:
+ /* Random error, if I remember correctly */
+ /* uhci_cleanup_unlink() is going to kill the Rx
+ * URB just after we return. No problem, at this
+ * point the URB will be idle ;-) - Jean II */
+ case -ESHUTDOWN:
+ /* That's usually a hot-unplug. Submit will fail... */
+ case -ETIME:
+ /* Usually precursor to a hot-unplug on OHCI. */
+ default:
+ self->netdev->stats.rx_errors++;
+ pr_debug("%s(), RX status %d, transfer_flags 0x%04X\n",
+ __func__, urb->status, urb->transfer_flags);
+ break;
+ }
+ /* If we received an error, we don't want to resubmit the
+ * Rx URB straight away but to give the USB layer a little
+ * bit of breathing room.
+ * We are in the USB thread context, therefore there is a
+ * danger of recursion (new URB we submit fails, we come
+ * back here).
+ * With recent USB stack (2.6.15+), I'm seeing that on
+ * hot unplug of the dongle...
+ * Lowest effective timer is 10ms...
+ * Jean II */
+ self->rx_defer_timer.function = irda_usb_rx_defer_expired;
+ self->rx_defer_timer.data = (unsigned long) urb;
+ mod_timer(&self->rx_defer_timer,
+ jiffies + msecs_to_jiffies(10));
+
+ return;
+ }
+
+ /* Check for empty frames */
+ if (urb->actual_length <= self->header_length) {
+ net_warn_ratelimited("%s(), empty frame!\n", __func__);
+ goto done;
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ self->stamp = ktime_get();
+
+ /* Check if we need to copy the data to a new skb or not.
+ * For most frames, we use ZeroCopy and pass the already
+ * allocated skb up the stack.
+ * If the frame is small, it is more efficient to copy it
+ * to save memory (copy will be fast anyway - that's
+ * called Rx-copy-break). Jean II */
+ docopy = (urb->actual_length < IRDA_RX_COPY_THRESHOLD);
+
+ /* Allocate a new skb */
+ if (self->capability & IUC_STIR421X)
+ newskb = dev_alloc_skb(docopy ? urb->actual_length :
+ IRDA_SKB_MAX_MTU +
+ USB_IRDA_STIR421X_HEADER);
+ else
+ newskb = dev_alloc_skb(docopy ? urb->actual_length :
+ IRDA_SKB_MAX_MTU);
+
+ if (!newskb) {
+ self->netdev->stats.rx_dropped++;
+ /* We could deliver the current skb, but this would stall
+ * the Rx path. Better drop the packet... Jean II */
+ goto done;
+ }
+
+ /* Make sure IP header get aligned (IrDA header is 5 bytes) */
+ /* But IrDA-USB header is 1 byte. Jean II */
+ //skb_reserve(newskb, USB_IRDA_HEADER - 1);
+
+ if(docopy) {
+ /* Copy packet, so we can recycle the original */
+ skb_copy_from_linear_data(skb, newskb->data, urb->actual_length);
+ /* Deliver this new skb */
+ dataskb = newskb;
+ /* And hook the old skb to the URB
+ * Note : we don't need to "clean up" the old skb,
+ * as we never touched it. Jean II */
+ } else {
+ /* We are using ZeroCopy. Deliver old skb */
+ dataskb = skb;
+ /* And hook the new skb to the URB */
+ skb = newskb;
+ }
+
+ /* Set proper length on skb & remove USB-IrDA header */
+ skb_put(dataskb, urb->actual_length);
+ skb_pull(dataskb, self->header_length);
+
+ /* Ask the networking layer to queue the packet for the IrDA stack */
+ dataskb->dev = self->netdev;
+ skb_reset_mac_header(dataskb);
+ dataskb->protocol = htons(ETH_P_IRDA);
+ len = dataskb->len;
+ netif_rx(dataskb);
+
+ /* Keep stats up to date */
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+
+done:
+ /* Note : at this point, the URB we've just received (urb)
+ * is still referenced by the USB layer. For example, if we
+ * have received a -ECONNRESET, uhci_cleanup_unlink() will
+ * continue to process it (in fact, cleaning it up).
+ * If we were to submit this URB, disaster would ensue.
+ * Therefore, we submit our idle URB, and put this URB in our
+ * idle slot....
+ * Jean II */
+ /* Note : with this scheme, we could submit the idle URB before
+ * processing the Rx URB. I don't think it would buy us anything as
+ * we are running in the USB thread context. Jean II */
+ next_urb = self->idle_rx_urb;
+
+ /* Recycle Rx URB : Now, the idle URB is the present one */
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+
+ /* Submit the idle URB to replace the URB we've just received.
+ * Do it last to avoid race conditions... Jean II */
+ irda_usb_submit(self, skb, next_urb);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * In case of errors, we want the USB layer to have time to recover.
+ * Now, it is time to resubmit ouur Rx URB...
+ */
+static void irda_usb_rx_defer_expired(unsigned long data)
+{
+ struct urb *urb = (struct urb *) data;
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct urb *next_urb;
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Same stuff as when Rx is done, see above... */
+ next_urb = self->idle_rx_urb;
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+ irda_usb_submit(self, skb, next_urb);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Callbak from IrDA layer. IrDA wants to know if we have
+ * started receiving anything.
+ */
+static int irda_usb_is_receiving(struct irda_usb_cb *self)
+{
+ /* Note : because of the way UHCI works, it's almost impossible
+ * to get this info. The Controller DMA directly to memory and
+ * signal only when the whole frame is finished. To know if the
+ * first TD of the URB has been filled or not seems hard work...
+ *
+ * The other solution would be to use the "receiving" command
+ * on the default decriptor with a usb_control_msg(), but that
+ * would add USB traffic and would return result only in the
+ * next USB frame (~1ms).
+ *
+ * I've been told that current dongles send status info on their
+ * interrupt endpoint, and that's what the Windows driver uses
+ * to know this info. Unfortunately, this is not yet in the spec...
+ *
+ * Jean II
+ */
+
+ return 0; /* For now */
+}
+
+#define STIR421X_PATCH_PRODUCT_VER "Product Version: "
+#define STIR421X_PATCH_STMP_TAG "STMP"
+#define STIR421X_PATCH_CODE_OFFSET 512 /* patch image starts before here */
+/* marks end of patch file header (PC DOS text file EOF character) */
+#define STIR421X_PATCH_END_OF_HDR_TAG 0x1A
+#define STIR421X_PATCH_BLOCK_SIZE 1023
+
+/*
+ * Function stir421x_fwupload (struct irda_usb_cb *self,
+ * unsigned char *patch,
+ * const unsigned int patch_len)
+ *
+ * Upload firmware code to SigmaTel 421X IRDA-USB dongle
+ */
+static int stir421x_fw_upload(struct irda_usb_cb *self,
+ const unsigned char *patch,
+ const unsigned int patch_len)
+{
+ int ret = -ENOMEM;
+ int actual_len = 0;
+ unsigned int i;
+ unsigned int block_size = 0;
+ unsigned char *patch_block;
+
+ patch_block = kzalloc(STIR421X_PATCH_BLOCK_SIZE, GFP_KERNEL);
+ if (patch_block == NULL)
+ return -ENOMEM;
+
+ /* break up patch into 1023-byte sections */
+ for (i = 0; i < patch_len; i += block_size) {
+ block_size = patch_len - i;
+
+ if (block_size > STIR421X_PATCH_BLOCK_SIZE)
+ block_size = STIR421X_PATCH_BLOCK_SIZE;
+
+ /* upload the patch section */
+ memcpy(patch_block, patch + i, block_size);
+
+ ret = usb_bulk_msg(self->usbdev,
+ usb_sndbulkpipe(self->usbdev,
+ self->bulk_out_ep),
+ patch_block, block_size,
+ &actual_len, msecs_to_jiffies(500));
+ pr_debug("%s(): Bulk send %u bytes, ret=%d\n",
+ __func__, actual_len, ret);
+
+ if (ret < 0)
+ break;
+
+ mdelay(10);
+ }
+
+ kfree(patch_block);
+
+ return ret;
+ }
+
+/*
+ * Function stir421x_patch_device(struct irda_usb_cb *self)
+ *
+ * Get a firmware code from userspase using hotplug request_firmware() call
+ */
+static int stir421x_patch_device(struct irda_usb_cb *self)
+{
+ unsigned int i;
+ int ret;
+ char stir421x_fw_name[12];
+ const struct firmware *fw;
+ const unsigned char *fw_version_ptr; /* pointer to version string */
+ unsigned long fw_version = 0;
+
+ /*
+ * Known firmware patch file names for STIR421x dongles
+ * are "42101001.sb" or "42101002.sb"
+ */
+ sprintf(stir421x_fw_name, "4210%4X.sb",
+ le16_to_cpu(self->usbdev->descriptor.bcdDevice));
+ ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* We get a patch from userspace */
+ net_info_ratelimited("%s(): Received firmware %s (%zu bytes)\n",
+ __func__, stir421x_fw_name, fw->size);
+
+ ret = -EINVAL;
+
+ /* Get the bcd product version */
+ if (!memcmp(fw->data, STIR421X_PATCH_PRODUCT_VER,
+ sizeof(STIR421X_PATCH_PRODUCT_VER) - 1)) {
+ fw_version_ptr = fw->data +
+ sizeof(STIR421X_PATCH_PRODUCT_VER) - 1;
+
+ /* Let's check if the product version is dotted */
+ if (fw_version_ptr[3] == '.' &&
+ fw_version_ptr[7] == '.') {
+ unsigned long major, minor, build;
+ major = simple_strtoul(fw_version_ptr, NULL, 10);
+ minor = simple_strtoul(fw_version_ptr + 4, NULL, 10);
+ build = simple_strtoul(fw_version_ptr + 8, NULL, 10);
+
+ fw_version = (major << 12)
+ + (minor << 8)
+ + ((build / 10) << 4)
+ + (build % 10);
+
+ pr_debug("%s(): Firmware Product version %ld\n",
+ __func__, fw_version);
+ }
+ }
+
+ if (self->usbdev->descriptor.bcdDevice == cpu_to_le16(fw_version)) {
+ /*
+ * If we're here, we've found a correct patch
+ * The actual image starts after the "STMP" keyword
+ * so forward to the firmware header tag
+ */
+ for (i = 0; i < fw->size && fw->data[i] !=
+ STIR421X_PATCH_END_OF_HDR_TAG; i++) ;
+ /* here we check for the out of buffer case */
+ if (i < STIR421X_PATCH_CODE_OFFSET && i < fw->size &&
+ STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) {
+ if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
+ sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
+
+ /* We can upload the patch to the target */
+ i += sizeof(STIR421X_PATCH_STMP_TAG);
+ ret = stir421x_fw_upload(self, &fw->data[i],
+ fw->size - i);
+ }
+ }
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+
+/********************** IRDA DEVICE CALLBACKS **********************/
+/*
+ * Main calls from the IrDA/Network subsystem.
+ * Mostly registering a new irda-usb device and removing it....
+ * We only deal with the IrDA side of the business, the USB side will
+ * be dealt with below...
+ */
+
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ *
+ * Note : don't mess with self->netopen - Jean II
+ */
+static int irda_usb_net_open(struct net_device *netdev)
+{
+ struct irda_usb_cb *self;
+ unsigned long flags;
+ char hwname[16];
+ int i;
+
+ IRDA_ASSERT(netdev != NULL, return -1;);
+ self = netdev_priv(netdev);
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ spin_lock_irqsave(&self->lock, flags);
+ /* Can only open the device if it's there */
+ if(!self->present) {
+ spin_unlock_irqrestore(&self->lock, flags);
+ net_warn_ratelimited("%s(), device not present!\n", __func__);
+ return -1;
+ }
+
+ if(self->needspatch) {
+ spin_unlock_irqrestore(&self->lock, flags);
+ net_warn_ratelimited("%s(), device needs patch\n", __func__);
+ return -EIO ;
+ }
+
+ /* Initialise default speed and xbofs value
+ * (IrLAP will change that soon) */
+ self->speed = -1;
+ self->xbofs = -1;
+ self->new_speed = -1;
+ self->new_xbofs = -1;
+
+ /* To do *before* submitting Rx urbs and starting net Tx queue
+ * Jean II */
+ self->netopen = 1;
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", self->usbdev->devnum);
+ self->irlap = irlap_open(netdev, &self->qos, hwname);
+ IRDA_ASSERT(self->irlap != NULL, return -1;);
+
+ /* Allow IrLAP to send data to us */
+ netif_start_queue(netdev);
+
+ /* We submit all the Rx URB except for one that we keep idle.
+ * Need to be initialised before submitting other USBs, because
+ * in some cases as soon as we submit the URBs the USB layer
+ * will trigger a dummy receive - Jean II */
+ self->idle_rx_urb = self->rx_urb[IU_MAX_ACTIVE_RX_URBS];
+ self->idle_rx_urb->context = NULL;
+
+ /* Now that we can pass data to IrLAP, allow the USB layer
+ * to send us some data... */
+ for (i = 0; i < IU_MAX_ACTIVE_RX_URBS; i++) {
+ struct sk_buff *skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!skb) {
+ /* If this ever happen, we are in deep s***.
+ * Basically, we can't start the Rx path... */
+ return -1;
+ }
+ //skb_reserve(newskb, USB_IRDA_HEADER - 1);
+ irda_usb_submit(self, skb, self->rx_urb[i]);
+ }
+
+ /* Ready to play !!! */
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_net_close (self)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int irda_usb_net_close(struct net_device *netdev)
+{
+ struct irda_usb_cb *self;
+ int i;
+
+ IRDA_ASSERT(netdev != NULL, return -1;);
+ self = netdev_priv(netdev);
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ /* Clear this flag *before* unlinking the urbs and *before*
+ * stopping the network Tx queue - Jean II */
+ self->netopen = 0;
+
+ /* Stop network Tx queue */
+ netif_stop_queue(netdev);
+
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
+ /* Deallocate all the Rx path buffers (URBs and skb) */
+ for (i = 0; i < self->max_rx_urb; i++) {
+ struct urb *urb = self->rx_urb[i];
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ /* Cancel the receive command */
+ usb_kill_urb(urb);
+ /* The skb is ours, free it */
+ if(skb) {
+ dev_kfree_skb(skb);
+ urb->context = NULL;
+ }
+ }
+ /* Cancel Tx and speed URB - need to be synchronous to avoid races */
+ usb_kill_urb(self->tx_urb);
+ usb_kill_urb(self->speed_urb);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ unsigned long flags;
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct irda_usb_cb *self;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Protect us from USB callbacks, net watchdog and else. */
+ spin_lock_irqsave(&self->lock, flags);
+ /* Check if the device is still there */
+ if(self->present) {
+ /* Set the desired speed */
+ self->new_speed = irq->ifr_baudrate;
+ irda_usb_change_speed_xbofs(self);
+ }
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Check if the IrDA stack is still there */
+ if(self->netopen)
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = irda_usb_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+
+/********************* IRDA CONFIG SUBROUTINES *********************/
+/*
+ * Various subroutines dealing with IrDA and network stuff we use to
+ * configure and initialise each irda-usb instance.
+ * These functions are used below in the main calls of the driver...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set proper values in the IrDA QOS structure
+ */
+static inline void irda_usb_init_qos(struct irda_usb_cb *self)
+{
+ struct irda_class_desc *desc;
+
+
+ desc = self->irda_desc;
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* See spec section 7.2 for meaning.
+ * Values are little endian (as most USB stuff), the IrDA stack
+ * use it in native order (see parameters.c). - Jean II */
+ self->qos.baud_rate.bits = le16_to_cpu(desc->wBaudRate);
+ self->qos.min_turn_time.bits = desc->bmMinTurnaroundTime;
+ self->qos.additional_bofs.bits = desc->bmAdditionalBOFs;
+ self->qos.window_size.bits = desc->bmWindowSize;
+ self->qos.data_size.bits = desc->bmDataSize;
+
+ pr_debug("%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
+ __func__, self->qos.baud_rate.bits, self->qos.data_size.bits,
+ self->qos.window_size.bits, self->qos.additional_bofs.bits,
+ self->qos.min_turn_time.bits);
+
+ /* Don't always trust what the dongle tell us */
+ if(self->capability & IUC_SIR_ONLY)
+ self->qos.baud_rate.bits &= 0x00ff;
+ if(self->capability & IUC_SMALL_PKT)
+ self->qos.data_size.bits = 0x07;
+ if(self->capability & IUC_NO_WINDOW)
+ self->qos.window_size.bits = 0x01;
+ if(self->capability & IUC_MAX_WINDOW)
+ self->qos.window_size.bits = 0x7f;
+ if(self->capability & IUC_MAX_XBOFS)
+ self->qos.additional_bofs.bits = 0x01;
+
+#if 1
+ /* Module parameter can override the rx window size */
+ if (qos_mtt_bits)
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+#endif
+ /*
+ * Note : most of those values apply only for the receive path,
+ * the transmit path will be set differently - Jean II
+ */
+ irda_qos_bits_to_value(&self->qos);
+}
+
+/*------------------------------------------------------------------*/
+static const struct net_device_ops irda_usb_netdev_ops = {
+ .ndo_open = irda_usb_net_open,
+ .ndo_stop = irda_usb_net_close,
+ .ndo_do_ioctl = irda_usb_net_ioctl,
+ .ndo_start_xmit = irda_usb_hard_xmit,
+ .ndo_tx_timeout = irda_usb_net_timeout,
+};
+
+/*
+ * Initialise the network side of the irda-usb instance
+ * Called when a new USB instance is registered in irda_usb_probe()
+ */
+static inline int irda_usb_open(struct irda_usb_cb *self)
+{
+ struct net_device *netdev = self->netdev;
+
+ netdev->netdev_ops = &irda_usb_netdev_ops;
+
+ irda_usb_init_qos(self);
+
+ return register_netdev(netdev);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Cleanup the network side of the irda-usb instance
+ * Called when a USB instance is removed in irda_usb_disconnect()
+ */
+static inline void irda_usb_close(struct irda_usb_cb *self)
+{
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Remove the speed buffer */
+ kfree(self->speed_buff);
+ self->speed_buff = NULL;
+
+ kfree(self->tx_buff);
+ self->tx_buff = NULL;
+}
+
+/********************** USB CONFIG SUBROUTINES **********************/
+/*
+ * Various subroutines dealing with USB stuff we use to configure and
+ * initialise each irda-usb instance.
+ * These functions are used below in the main calls of the driver...
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_parse_endpoints(dev, ifnum)
+ *
+ * Parse the various endpoints and find the one we need.
+ *
+ * The endpoint are the pipes used to communicate with the USB device.
+ * The spec defines 2 endpoints of type bulk transfer, one in, and one out.
+ * These are used to pass frames back and forth with the dongle.
+ * Most dongle have also an interrupt endpoint, that will be probably
+ * documented in the next spec...
+ */
+static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_host_endpoint *endpoint, int ennum)
+{
+ int i; /* Endpoint index in table */
+
+ /* Init : no endpoints */
+ self->bulk_in_ep = 0;
+ self->bulk_out_ep = 0;
+ self->bulk_int_ep = 0;
+
+ /* Let's look at all those endpoints */
+ for(i = 0; i < ennum; i++) {
+ /* All those variables will get optimised by the compiler,
+ * so let's aim for clarity... - Jean II */
+ __u8 ep; /* Endpoint address */
+ __u8 dir; /* Endpoint direction */
+ __u8 attr; /* Endpoint attribute */
+ __u16 psize; /* Endpoint max packet size in bytes */
+
+ /* Get endpoint address, direction and attribute */
+ ep = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ dir = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ attr = endpoint[i].desc.bmAttributes;
+ psize = le16_to_cpu(endpoint[i].desc.wMaxPacketSize);
+
+ /* Is it a bulk endpoint ??? */
+ if(attr == USB_ENDPOINT_XFER_BULK) {
+ /* We need to find an IN and an OUT */
+ if(dir == USB_DIR_IN) {
+ /* This is our Rx endpoint */
+ self->bulk_in_ep = ep;
+ } else {
+ /* This is our Tx endpoint */
+ self->bulk_out_ep = ep;
+ self->bulk_out_mtu = psize;
+ }
+ } else {
+ if((attr == USB_ENDPOINT_XFER_INT) &&
+ (dir == USB_DIR_IN)) {
+ /* This is our interrupt endpoint */
+ self->bulk_int_ep = ep;
+ } else {
+ net_err_ratelimited("%s(), Unrecognised endpoint %02X\n",
+ __func__, ep);
+ }
+ }
+ }
+
+ pr_debug("%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
+ __func__, self->bulk_in_ep, self->bulk_out_ep,
+ self->bulk_out_mtu, self->bulk_int_ep);
+
+ return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
+}
+
+#ifdef IU_DUMP_CLASS_DESC
+/*------------------------------------------------------------------*/
+/*
+ * Function usb_irda_dump_class_desc(desc)
+ *
+ * Prints out the contents of the IrDA class descriptor
+ *
+ */
+static inline void irda_usb_dump_class_desc(struct irda_class_desc *desc)
+{
+ /* Values are little endian */
+ printk("bLength=%x\n", desc->bLength);
+ printk("bDescriptorType=%x\n", desc->bDescriptorType);
+ printk("bcdSpecRevision=%x\n", le16_to_cpu(desc->bcdSpecRevision));
+ printk("bmDataSize=%x\n", desc->bmDataSize);
+ printk("bmWindowSize=%x\n", desc->bmWindowSize);
+ printk("bmMinTurnaroundTime=%d\n", desc->bmMinTurnaroundTime);
+ printk("wBaudRate=%x\n", le16_to_cpu(desc->wBaudRate));
+ printk("bmAdditionalBOFs=%x\n", desc->bmAdditionalBOFs);
+ printk("bIrdaRateSniff=%x\n", desc->bIrdaRateSniff);
+ printk("bMaxUnicastList=%x\n", desc->bMaxUnicastList);
+}
+#endif /* IU_DUMP_CLASS_DESC */
+
+/*------------------------------------------------------------------*/
+/*
+ * Function irda_usb_find_class_desc(intf)
+ *
+ * Returns instance of IrDA class descriptor, or NULL if not found
+ *
+ * The class descriptor is some extra info that IrDA USB devices will
+ * offer to us, describing their IrDA characteristics. We will use that in
+ * irda_usb_init_qos()
+ */
+static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf)
+{
+ struct usb_device *dev = interface_to_usbdev (intf);
+ struct irda_class_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ /* USB-IrDA class spec 1.0:
+ * 6.1.3: Standard "Get Descriptor" Device Request is not
+ * appropriate to retrieve class-specific descriptor
+ * 6.2.5: Class Specific "Get Class Descriptor" Interface Request
+ * is mandatory and returns the USB-IrDA class descriptor
+ */
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev,0),
+ IU_REQ_GET_CLASS_DESC,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, intf->altsetting->desc.bInterfaceNumber, desc,
+ sizeof(*desc), 500);
+
+ pr_debug("%s(), ret=%d\n", __func__, ret);
+ if (ret < sizeof(*desc)) {
+ net_warn_ratelimited("usb-irda: class_descriptor read %s (%d)\n",
+ ret < 0 ? "failed" : "too short", ret);
+ }
+ else if (desc->bDescriptorType != USB_DT_IRDA) {
+ net_warn_ratelimited("usb-irda: bad class_descriptor type\n");
+ }
+ else {
+#ifdef IU_DUMP_CLASS_DESC
+ irda_usb_dump_class_desc(desc);
+#endif /* IU_DUMP_CLASS_DESC */
+
+ return desc;
+ }
+ kfree(desc);
+ return NULL;
+}
+
+/*********************** USB DEVICE CALLBACKS ***********************/
+/*
+ * Main calls from the USB subsystem.
+ * Mostly registering a new irda-usb device and removing it....
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ * The USB layer protect us from reentrancy (via BKL), so we don't need
+ * to spinlock in there... Jean II
+ */
+static int irda_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *net;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct irda_usb_cb *self;
+ struct usb_host_interface *interface;
+ struct irda_class_desc *irda_desc;
+ int ret = -ENOMEM;
+ int i; /* Driver instance index / Rx URB index */
+
+ /* Note : the probe make sure to call us only for devices that
+ * matches the list of dongle (top of the file). So, we
+ * don't need to check if the dongle is really ours.
+ * Jean II */
+
+ net_info_ratelimited("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ net = alloc_irdadev(sizeof(*self));
+ if (!net)
+ goto err_out;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ self = netdev_priv(net);
+ self->netdev = net;
+ spin_lock_init(&self->lock);
+ init_timer(&self->rx_defer_timer);
+
+ self->capability = id->driver_info;
+ self->needspatch = ((self->capability & IUC_STIR421X) != 0);
+
+ /* Create all of the needed urbs */
+ if (self->capability & IUC_STIR421X) {
+ self->max_rx_urb = IU_SIGMATEL_MAX_RX_URBS;
+ self->header_length = USB_IRDA_STIR421X_HEADER;
+ } else {
+ self->max_rx_urb = IU_MAX_RX_URBS;
+ self->header_length = USB_IRDA_HEADER;
+ }
+
+ self->rx_urb = kcalloc(self->max_rx_urb, sizeof(struct urb *),
+ GFP_KERNEL);
+ if (!self->rx_urb)
+ goto err_free_net;
+
+ for (i = 0; i < self->max_rx_urb; i++) {
+ self->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->rx_urb[i]) {
+ goto err_out_1;
+ }
+ }
+ self->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->tx_urb) {
+ goto err_out_1;
+ }
+ self->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!self->speed_urb) {
+ goto err_out_2;
+ }
+
+ /* Is this really necessary? (no, except maybe for broken devices) */
+ if (usb_reset_configuration (dev) < 0) {
+ dev_err(&intf->dev, "reset_configuration failed\n");
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Is this really necessary? */
+ /* Note : some driver do hardcode the interface number, some others
+ * specify an alternate, but very few driver do like this.
+ * Jean II */
+ ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0);
+ pr_debug("usb-irda: set interface %d result %d\n",
+ intf->altsetting->desc.bInterfaceNumber, ret);
+ switch (ret) {
+ case 0:
+ break;
+ case -EPIPE: /* -EPIPE = -32 */
+ /* Martin Diehl says if we get a -EPIPE we should
+ * be fine and we don't need to do a usb_clear_halt().
+ * - Jean II */
+ pr_debug("%s(), Received -EPIPE, ignoring...\n",
+ __func__);
+ break;
+ default:
+ pr_debug("%s(), Unknown error %d\n", __func__, ret);
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ /* Find our endpoints */
+ interface = intf->cur_altsetting;
+ if(!irda_usb_parse_endpoints(self, interface->endpoint,
+ interface->desc.bNumEndpoints)) {
+ net_err_ratelimited("%s(), Bogus endpoints...\n", __func__);
+ ret = -EIO;
+ goto err_out_3;
+ }
+
+ self->usbdev = dev;
+
+ /* Find IrDA class descriptor */
+ irda_desc = irda_usb_find_class_desc(intf);
+ ret = -ENODEV;
+ if (!irda_desc)
+ goto err_out_3;
+
+ if (self->needspatch) {
+ ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
+ 0x02, 0x40, 0, 0, NULL, 0, 500);
+ if (ret < 0) {
+ pr_debug("usb_control_msg failed %d\n", ret);
+ goto err_out_3;
+ } else {
+ mdelay(10);
+ }
+ }
+
+ self->irda_desc = irda_desc;
+ self->present = 1;
+ self->netopen = 0;
+ self->usbintf = intf;
+
+ /* Allocate the buffer for speed changes */
+ /* Don't change this buffer size and allocation without doing
+ * some heavy and complete testing. Don't ask why :-(
+ * Jean II */
+ ret = -ENOMEM;
+ self->speed_buff = kzalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
+ if (!self->speed_buff)
+ goto err_out_3;
+
+ self->tx_buff = kzalloc(IRDA_SKB_MAX_MTU + self->header_length,
+ GFP_KERNEL);
+ if (!self->tx_buff)
+ goto err_out_4;
+
+ ret = irda_usb_open(self);
+ if (ret)
+ goto err_out_5;
+
+ net_info_ratelimited("IrDA: Registered device %s\n", net->name);
+ usb_set_intfdata(intf, self);
+
+ if (self->needspatch) {
+ /* Now we fetch and upload the firmware patch */
+ ret = stir421x_patch_device(self);
+ self->needspatch = (ret < 0);
+ if (self->needspatch) {
+ net_err_ratelimited("STIR421X: Couldn't upload patch\n");
+ goto err_out_6;
+ }
+
+ /* replace IrDA class descriptor with what patched device is now reporting */
+ irda_desc = irda_usb_find_class_desc (self->usbintf);
+ if (!irda_desc) {
+ ret = -ENODEV;
+ goto err_out_6;
+ }
+ kfree(self->irda_desc);
+ self->irda_desc = irda_desc;
+ irda_usb_init_qos(self);
+ }
+
+ return 0;
+err_out_6:
+ unregister_netdev(self->netdev);
+err_out_5:
+ kfree(self->tx_buff);
+err_out_4:
+ kfree(self->speed_buff);
+err_out_3:
+ /* Free all urbs that we may have created */
+ usb_free_urb(self->speed_urb);
+err_out_2:
+ usb_free_urb(self->tx_urb);
+err_out_1:
+ for (i = 0; i < self->max_rx_urb; i++)
+ usb_free_urb(self->rx_urb[i]);
+ kfree(self->rx_urb);
+err_free_net:
+ free_netdev(net);
+err_out:
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * The current irda-usb device is removed, the USB layer tell us
+ * to shut it down...
+ * One of the constraints is that when we exit this function,
+ * we cannot use the usb_device no more. Gone. Destroyed. kfree().
+ * Most other subsystem allow you to destroy the instance at a time
+ * when it's convenient to you, to postpone it to a later date, but
+ * not the USB subsystem.
+ * So, we must make bloody sure that everything gets deactivated.
+ * Jean II
+ */
+static void irda_usb_disconnect(struct usb_interface *intf)
+{
+ unsigned long flags;
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ usb_set_intfdata(intf, NULL);
+ if (!self)
+ return;
+
+ /* Make sure that the Tx path is not executing. - Jean II */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Oups ! We are not there any more.
+ * This will stop/desactivate the Tx path. - Jean II */
+ self->present = 0;
+
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
+ /* We need to have irq enabled to unlink the URBs. That's OK,
+ * at this point the Tx path is gone - Jean II */
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ /* Hum... Check if networking is still active (avoid races) */
+ if((self->netopen) || (self->irlap)) {
+ /* Accept no more transmissions */
+ /*netif_device_detach(self->netdev);*/
+ netif_stop_queue(self->netdev);
+ /* Stop all the receive URBs. Must be synchronous. */
+ for (i = 0; i < self->max_rx_urb; i++)
+ usb_kill_urb(self->rx_urb[i]);
+ /* Cancel Tx and speed URB.
+ * Make sure it's synchronous to avoid races. */
+ usb_kill_urb(self->tx_urb);
+ usb_kill_urb(self->speed_urb);
+ }
+
+ /* Cleanup the device stuff */
+ irda_usb_close(self);
+ /* No longer attached to USB bus */
+ self->usbdev = NULL;
+ self->usbintf = NULL;
+
+ /* Clean up our urbs */
+ for (i = 0; i < self->max_rx_urb; i++)
+ usb_free_urb(self->rx_urb[i]);
+ kfree(self->rx_urb);
+ /* Clean up Tx and speed URB */
+ usb_free_urb(self->tx_urb);
+ usb_free_urb(self->speed_urb);
+
+ /* Free self and network device */
+ free_netdev(self->netdev);
+ pr_debug("%s(), USB IrDA Disconnected\n", __func__);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int irda_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ netif_device_detach(self->netdev);
+
+ if (self->tx_urb != NULL)
+ usb_kill_urb(self->tx_urb);
+ if (self->speed_urb != NULL)
+ usb_kill_urb(self->speed_urb);
+ for (i = 0; i < self->max_rx_urb; i++) {
+ if (self->rx_urb[i] != NULL)
+ usb_kill_urb(self->rx_urb[i]);
+ }
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int irda_usb_resume(struct usb_interface *intf)
+{
+ struct irda_usb_cb *self = usb_get_intfdata(intf);
+ int i;
+
+ for (i = 0; i < self->max_rx_urb; i++) {
+ if (self->rx_urb[i] != NULL)
+ usb_submit_urb(self->rx_urb[i], GFP_KERNEL);
+ }
+
+ netif_device_attach(self->netdev);
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "irda-usb",
+ .probe = irda_usb_probe,
+ .disconnect = irda_usb_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = irda_usb_suspend,
+ .resume = irda_usb_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+/*
+ * Module parameters
+ */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+MODULE_AUTHOR("Roman Weissgaerber <weissg@vienna.at>, Dag Brattli <dag@brattli.net>, Jean Tourrilhes <jt@hpl.hp.com> and Nick Fedchik <nick@fedchik.org.ua>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/irda-usb.h b/drivers/staging/irda/drivers/irda-usb.h
new file mode 100644
index 000000000000..8ac389fa9348
--- /dev/null
+++ b/drivers/staging/irda/drivers/irda-usb.h
@@ -0,0 +1,174 @@
+/*****************************************************************************
+ *
+ * Filename: irda-usb.h
+ * Version: 0.10
+ * Description: IrDA-USB Driver
+ * Status: Experimental
+ * Author: Dag Brattli <dag@brattli.net>
+ *
+ * Copyright (C) 2001, Roman Weissgaerber <weissg@vienna.at>
+ * Copyright (C) 2000, Dag Brattli <dag@brattli.net>
+ * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+ * Copyright (C) 2004, SigmaTel, Inc. <irquality@sigmatel.com>
+ * Copyright (C) 2005, Milan Beno <beno@pobox.sk>
+ * Copyright (C) 2006, Nick FEdchik <nick@fedchik.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include <linux/ktime.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> /* struct irlap_cb */
+
+#define RX_COPY_THRESHOLD 200
+#define IRDA_USB_MAX_MTU 2051
+#define IRDA_USB_SPEED_MTU 64 /* Weird, but work like this */
+
+/* Maximum number of active URB on the Rx path
+ * This is the amount of buffers the we keep between the USB harware and the
+ * IrDA stack.
+ *
+ * Note : the network layer does also queue the packets between us and the
+ * IrDA stack, and is actually pretty fast and efficient in doing that.
+ * Therefore, we don't need to have a large number of URBs, and we can
+ * perfectly live happy with only one. We certainly don't need to keep the
+ * full IrTTP window around here...
+ * I repeat for those who have trouble to understand : 1 URB is plenty
+ * good enough to handle back-to-back (brickwalled) frames. I tried it,
+ * it works (it's the hardware that has trouble doing it).
+ *
+ * Having 2 URBs would allow the USB stack to process one URB while we take
+ * care of the other and then swap the URBs...
+ * On the other hand, increasing the number of URB will have penalities
+ * in term of latency and will interact with the link management in IrLAP...
+ * Jean II */
+#define IU_MAX_ACTIVE_RX_URBS 1 /* Don't touch !!! */
+
+/* When a Rx URB is passed back to us, we can't reuse it immediately,
+ * because it may still be referenced by the USB layer. Therefore we
+ * need to keep one extra URB in the Rx path.
+ * Jean II */
+#define IU_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + 1)
+
+/* Various ugly stuff to try to workaround generic problems */
+/* Send speed command in case of timeout, just for trying to get things sane */
+#define IU_BUG_KICK_TIMEOUT
+/* Show the USB class descriptor */
+#undef IU_DUMP_CLASS_DESC
+/* Assume a minimum round trip latency for USB transfer (in us)...
+ * USB transfer are done in the next USB slot if there is no traffic
+ * (1/19 msec) and is done at 12 Mb/s :
+ * Waiting for slot + tx = (53us + 16us) * 2 = 137us minimum.
+ * Rx notification will only be done at the end of the USB frame period :
+ * OHCI : frame period = 1ms
+ * UHCI : frame period = 1ms, but notification can take 2 or 3 ms :-(
+ * EHCI : frame period = 125us */
+#define IU_USB_MIN_RTT 500 /* This should be safe in most cases */
+
+/* Inbound header */
+#define MEDIA_BUSY 0x80
+
+#define SPEED_2400 0x01
+#define SPEED_9600 0x02
+#define SPEED_19200 0x03
+#define SPEED_38400 0x04
+#define SPEED_57600 0x05
+#define SPEED_115200 0x06
+#define SPEED_576000 0x07
+#define SPEED_1152000 0x08
+#define SPEED_4000000 0x09
+#define SPEED_16000000 0x0a
+
+/* Basic capabilities */
+#define IUC_DEFAULT 0x00 /* Basic device compliant with 1.0 spec */
+/* Main bugs */
+#define IUC_SPEED_BUG 0x01 /* Device doesn't set speed after the frame */
+#define IUC_NO_WINDOW 0x02 /* Device doesn't behave with big Rx window */
+#define IUC_NO_TURN 0x04 /* Device doesn't do turnaround by itself */
+/* Not currently used */
+#define IUC_SIR_ONLY 0x08 /* Device doesn't behave at FIR speeds */
+#define IUC_SMALL_PKT 0x10 /* Device doesn't behave with big Rx packets */
+#define IUC_MAX_WINDOW 0x20 /* Device underestimate the Rx window */
+#define IUC_MAX_XBOFS 0x40 /* Device need more xbofs than advertised */
+#define IUC_STIR421X 0x80 /* SigmaTel 4210/4220/4116 VFIR */
+
+/* USB class definitions */
+#define USB_IRDA_HEADER 0x01
+#define USB_CLASS_IRDA 0x02 /* USB_CLASS_APP_SPEC subclass */
+#define USB_DT_IRDA 0x21
+#define USB_IRDA_STIR421X_HEADER 0x03
+#define IU_SIGMATEL_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + \
+ USB_IRDA_STIR421X_HEADER)
+
+struct irda_class_desc {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __le16 bcdSpecRevision;
+ __u8 bmDataSize;
+ __u8 bmWindowSize;
+ __u8 bmMinTurnaroundTime;
+ __le16 wBaudRate;
+ __u8 bmAdditionalBOFs;
+ __u8 bIrdaRateSniff;
+ __u8 bMaxUnicastList;
+} __packed;
+
+/* class specific interface request to get the IrDA-USB class descriptor
+ * (6.2.5, USB-IrDA class spec 1.0) */
+
+#define IU_REQ_GET_CLASS_DESC 0x06
+#define STIR421X_MAX_PATCH_DOWNLOAD_SIZE 1023
+
+struct irda_usb_cb {
+ struct irda_class_desc *irda_desc;
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct usb_interface *usbintf; /* init: probe_irda */
+ int netopen; /* Device is active for network */
+ int present; /* Device is present on the bus */
+ __u32 capability; /* Capability of the hardware */
+ __u8 bulk_in_ep; /* Rx Endpoint assignments */
+ __u8 bulk_out_ep; /* Tx Endpoint assignments */
+ __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
+ __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
+
+ __u8 max_rx_urb;
+ struct urb **rx_urb; /* URBs used to receive data frames */
+ struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
+ struct urb *tx_urb; /* URB used to send data frames */
+ struct urb *speed_urb; /* URB used to send speed commands */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdev. */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos;
+ char *speed_buff; /* Buffer for speed changes */
+ char *tx_buff;
+
+ ktime_t stamp;
+
+ spinlock_t lock; /* For serializing Tx operations */
+
+ __u16 xbofs; /* Current xbofs setting */
+ __s16 new_xbofs; /* xbofs we need to set */
+ __u32 speed; /* Current speed */
+ __s32 new_speed; /* speed we need to set */
+
+ __u8 header_length; /* USB-IrDA frame header size */
+ int needspatch; /* device needs firmware patch */
+
+ struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
+};
+
diff --git a/drivers/staging/irda/drivers/irtty-sir.c b/drivers/staging/irda/drivers/irtty-sir.c
new file mode 100644
index 000000000000..7a20a9a4663a
--- /dev/null
+++ b/drivers/staging/irda/drivers/irtty-sir.c
@@ -0,0 +1,570 @@
+/*********************************************************************
+ *
+ * Filename: irtty-sir.c
+ * Version: 2.0
+ * Description: IrDA line discipline implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Dec 9 21:18:38 1997
+ * Modified at: Sun Oct 27 22:13:30 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ * Sources: slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+#include "irtty-sir.h"
+
+static int qos_mtt_bits = 0x03; /* 5 ms or more */
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+/* ------------------------------------------------------- */
+
+/* device configuration callbacks always invoked with irda-thread context */
+
+/* find out, how many chars we have in buffers below us
+ * this is allowed to lie, i.e. return less chars than we
+ * actually have. The returned value is used to determine
+ * how long the irdathread should wait before doing the
+ * real blocking wait_until_sent()
+ */
+
+static int irtty_chars_in_buffer(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv = dev->priv;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ return tty_chars_in_buffer(priv->tty);
+}
+
+/* Wait (sleep) until underlaying hardware finished transmission
+ * i.e. hardware buffers are drained
+ * this must block and not return before all characters are really sent
+ *
+ * If the tty sits on top of a 16550A-like uart, there are typically
+ * up to 16 bytes in the fifo - f.e. 9600 bps 8N1 needs 16.7 msec
+ *
+ * With usbserial the uart-fifo is basically replaced by the converter's
+ * outgoing endpoint buffer, which can usually hold 64 bytes (at least).
+ * With pl2303 it appears we are safe with 60msec here.
+ *
+ * I really wish all serial drivers would provide
+ * correct implementation of wait_until_sent()
+ */
+
+#define USBSERIAL_TX_DONE_DELAY 60
+
+static void irtty_wait_until_sent(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ tty = priv->tty;
+ if (tty->ops->wait_until_sent) {
+ tty->ops->wait_until_sent(tty, msecs_to_jiffies(100));
+ }
+ else {
+ msleep(USBSERIAL_TX_DONE_DELAY);
+ }
+}
+
+/*
+ * Function irtty_change_speed (dev, speed)
+ *
+ * Change the speed of the serial port.
+ *
+ * This may sleep in set_termios (usbserial driver f.e.) and must
+ * not be called from interrupt/timer/tasklet therefore.
+ * All such invocations are deferred to kIrDAd now so we can sleep there.
+ */
+
+static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+ struct ktermios old_termios;
+ int cflag;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ tty = priv->tty;
+
+ down_write(&tty->termios_rwsem);
+ old_termios = tty->termios;
+ cflag = tty->termios.c_cflag;
+ tty_encode_baud_rate(tty, speed, speed);
+ if (tty->ops->set_termios)
+ tty->ops->set_termios(tty, &old_termios);
+ priv->io.speed = speed;
+ up_write(&tty->termios_rwsem);
+
+ return 0;
+}
+
+/*
+ * Function irtty_set_dtr_rts (dev, dtr, rts)
+ *
+ * This function can be used by dongles etc. to set or reset the status
+ * of the dtr and rts lines
+ */
+
+static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ struct sirtty_cb *priv = dev->priv;
+ int set = 0;
+ int clear = 0;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ if (rts)
+ set |= TIOCM_RTS;
+ else
+ clear |= TIOCM_RTS;
+ if (dtr)
+ set |= TIOCM_DTR;
+ else
+ clear |= TIOCM_DTR;
+
+ /*
+ * We can't use ioctl() because it expects a non-null file structure,
+ * and we don't have that here.
+ * This function is not yet defined for all tty driver, so
+ * let's be careful... Jean II
+ */
+ IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;);
+ priv->tty->ops->tiocmset(priv->tty, set, clear);
+
+ return 0;
+}
+
+/* ------------------------------------------------------- */
+
+/* called from sir_dev when there is more data to send
+ * context is either netdev->hard_xmit or some transmit-completion bh
+ * i.e. we are under spinlock here and must not sleep.
+ */
+
+static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t len)
+{
+ struct sirtty_cb *priv = dev->priv;
+ struct tty_struct *tty;
+ int writelen;
+
+ IRDA_ASSERT(priv != NULL, return -1;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
+
+ tty = priv->tty;
+ if (!tty->ops->write)
+ return 0;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ writelen = tty_write_room(tty);
+ if (writelen > len)
+ writelen = len;
+ return tty->ops->write(tty, ptr, writelen);
+}
+
+/* ------------------------------------------------------- */
+
+/* irda line discipline callbacks */
+
+/*
+ * Function irtty_receive_buf( tty, cp, count)
+ *
+ * Handle the 'receiver data ready' interrupt. This function is called
+ * by the 'tty_io' module in the kernel when a block of IrDA data has
+ * been received, which can now be decapsulated and delivered for
+ * further processing
+ *
+ * calling context depends on underlying driver and tty->port->low_latency!
+ * for example (low_latency: 1 / 0):
+ * serial.c: uart-interrupt / softint
+ * usbserial: urb-complete-interrupt / softint
+ */
+
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct sir_dev *dev;
+ struct sirtty_cb *priv = tty->disc_data;
+ int i;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ if (unlikely(count==0)) /* yes, this happens */
+ return;
+
+ dev = priv->dev;
+ if (!dev) {
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ /*
+ * Characters received with a parity error, etc?
+ */
+ if (fp && *fp++) {
+ pr_debug("Framing or parity error!\n");
+ sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
+ return;
+ }
+ }
+
+ sirdev_receive(dev, cp, count);
+}
+
+/*
+ * Function irtty_write_wakeup (tty)
+ *
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ *
+ */
+static void irtty_write_wakeup(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (priv->dev)
+ sirdev_write_complete(priv->dev);
+}
+
+/* ------------------------------------------------------- */
+
+/*
+ * Function irtty_stop_receiver (tty, stop)
+ *
+ */
+
+static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
+{
+ struct ktermios old_termios;
+ int cflag;
+
+ down_write(&tty->termios_rwsem);
+ old_termios = tty->termios;
+ cflag = tty->termios.c_cflag;
+
+ if (stop)
+ cflag &= ~CREAD;
+ else
+ cflag |= CREAD;
+
+ tty->termios.c_cflag = cflag;
+ if (tty->ops->set_termios)
+ tty->ops->set_termios(tty, &old_termios);
+ up_write(&tty->termios_rwsem);
+}
+
+/*****************************************************************/
+
+/* serialize ldisc open/close with sir_dev */
+static DEFINE_MUTEX(irtty_mutex);
+
+/* notifier from sir_dev when irda% device gets opened (ifup) */
+
+static int irtty_start_dev(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv;
+ struct tty_struct *tty;
+
+ /* serialize with ldisc open/close */
+ mutex_lock(&irtty_mutex);
+
+ priv = dev->priv;
+ if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
+ mutex_unlock(&irtty_mutex);
+ return -ESTALE;
+ }
+
+ tty = priv->tty;
+
+ if (tty->ops->start)
+ tty->ops->start(tty);
+ /* Make sure we can receive more data */
+ irtty_stop_receiver(tty, FALSE);
+
+ mutex_unlock(&irtty_mutex);
+ return 0;
+}
+
+/* notifier from sir_dev when irda% device gets closed (ifdown) */
+
+static int irtty_stop_dev(struct sir_dev *dev)
+{
+ struct sirtty_cb *priv;
+ struct tty_struct *tty;
+
+ /* serialize with ldisc open/close */
+ mutex_lock(&irtty_mutex);
+
+ priv = dev->priv;
+ if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
+ mutex_unlock(&irtty_mutex);
+ return -ESTALE;
+ }
+
+ tty = priv->tty;
+
+ /* Make sure we don't receive more data */
+ irtty_stop_receiver(tty, TRUE);
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+
+ mutex_unlock(&irtty_mutex);
+
+ return 0;
+}
+
+/* ------------------------------------------------------- */
+
+static struct sir_driver sir_tty_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = "sir_tty",
+ .start_dev = irtty_start_dev,
+ .stop_dev = irtty_stop_dev,
+ .do_write = irtty_do_write,
+ .chars_in_buffer = irtty_chars_in_buffer,
+ .wait_until_sent = irtty_wait_until_sent,
+ .set_speed = irtty_change_speed,
+ .set_dtr_rts = irtty_set_dtr_rts,
+};
+
+/* ------------------------------------------------------- */
+
+/*
+ * Function irtty_ioctl (tty, file, cmd, arg)
+ *
+ * The Swiss army knife of system calls :-)
+ *
+ */
+static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct irtty_info { char name[6]; } info;
+ struct sir_dev *dev;
+ struct sirtty_cb *priv = tty->disc_data;
+ int err = 0;
+
+ IRDA_ASSERT(priv != NULL, return -ENODEV;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
+
+ pr_debug("%s(cmd=0x%X)\n", __func__, cmd);
+
+ dev = priv->dev;
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ switch (cmd) {
+ case IRTTY_IOCTDONGLE:
+ /* this call blocks for completion */
+ err = sirdev_set_dongle(dev, (IRDA_DONGLE) arg);
+ break;
+
+ case IRTTY_IOCGET:
+ IRDA_ASSERT(dev->netdev != NULL, return -1;);
+
+ memset(&info, 0, sizeof(info));
+ strncpy(info.name, dev->netdev->name, sizeof(info.name)-1);
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ err = -EFAULT;
+ break;
+ default:
+ err = tty_mode_ioctl(tty, file, cmd, arg);
+ break;
+ }
+ return err;
+}
+
+
+/*
+ * Function irtty_open(tty)
+ *
+ * This function is called by the TTY module when the IrDA line
+ * discipline is called for. Because we are sure the tty line exists,
+ * we only have to link it to a free IrDA channel.
+ */
+static int irtty_open(struct tty_struct *tty)
+{
+ struct sir_dev *dev;
+ struct sirtty_cb *priv;
+ int ret = 0;
+
+ /* Module stuff handled via irda_ldisc.owner - Jean II */
+
+ /* stop the underlying driver */
+ irtty_stop_receiver(tty, TRUE);
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+
+ tty_driver_flush_buffer(tty);
+
+ /* apply mtt override */
+ sir_tty_drv.qos_mtt_bits = qos_mtt_bits;
+
+ /* get a sir device instance for this driver */
+ dev = sirdev_get_instance(&sir_tty_drv, tty->name);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* allocate private device info block */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ priv->magic = IRTTY_MAGIC;
+ priv->tty = tty;
+ priv->dev = dev;
+
+ /* serialize with start_dev - in case we were racing with ifup */
+ mutex_lock(&irtty_mutex);
+
+ dev->priv = priv;
+ tty->disc_data = priv;
+ tty->receive_room = 65536;
+
+ mutex_unlock(&irtty_mutex);
+
+ pr_debug("%s - %s: irda line discipline opened\n", __func__, tty->name);
+
+ return 0;
+
+out_put:
+ sirdev_put_instance(dev);
+out:
+ return ret;
+}
+
+/*
+ * Function irtty_close (tty)
+ *
+ * Close down a IrDA channel. This means flushing out any pending queues,
+ * and then restoring the TTY line discipline to what it was before it got
+ * hooked to IrDA (which usually is TTY again).
+ */
+static void irtty_close(struct tty_struct *tty)
+{
+ struct sirtty_cb *priv = tty->disc_data;
+
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
+
+ /* Hm, with a dongle attached the dongle driver wants
+ * to close the dongle - which requires the use of
+ * some tty write and/or termios or ioctl operations.
+ * Are we allowed to call those when already requested
+ * to shutdown the ldisc?
+ * If not, we should somehow mark the dev being staled.
+ * Question remains, how to close the dongle in this case...
+ * For now let's assume we are granted to issue tty driver calls
+ * until we return here from the ldisc close. I'm just wondering
+ * how this behaves with hotpluggable serial hardware like
+ * rs232-pcmcia card or usb-serial...
+ *
+ * priv->tty = NULL?;
+ */
+
+ /* we are dead now */
+ tty->disc_data = NULL;
+
+ sirdev_put_instance(priv->dev);
+
+ /* Stop tty */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+
+ kfree(priv);
+
+ pr_debug("%s - %s: irda line discipline closed\n", __func__, tty->name);
+}
+
+/* ------------------------------------------------------- */
+
+static struct tty_ldisc_ops irda_ldisc = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "irda",
+ .flags = 0,
+ .open = irtty_open,
+ .close = irtty_close,
+ .read = NULL,
+ .write = NULL,
+ .ioctl = irtty_ioctl,
+ .poll = NULL,
+ .receive_buf = irtty_receive_buf,
+ .write_wakeup = irtty_write_wakeup,
+ .owner = THIS_MODULE,
+};
+
+/* ------------------------------------------------------- */
+
+static int __init irtty_sir_init(void)
+{
+ int err;
+
+ if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0)
+ net_err_ratelimited("IrDA: can't register line discipline (err = %d)\n",
+ err);
+ return err;
+}
+
+static void __exit irtty_sir_cleanup(void)
+{
+ int err;
+
+ if ((err = tty_unregister_ldisc(N_IRDA))) {
+ net_err_ratelimited("%s(), can't unregister line discipline (err = %d)\n",
+ __func__, err);
+ }
+}
+
+module_init(irtty_sir_init);
+module_exit(irtty_sir_cleanup);
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("IrDA TTY device driver");
+MODULE_ALIAS_LDISC(N_IRDA);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/irda/drivers/irtty-sir.h b/drivers/staging/irda/drivers/irtty-sir.h
new file mode 100644
index 000000000000..b132d8f6eb13
--- /dev/null
+++ b/drivers/staging/irda/drivers/irtty-sir.h
@@ -0,0 +1,34 @@
+/*********************************************************************
+ *
+ * sir_tty.h: definitions for the irtty_sir client driver (former irtty)
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef IRTTYSIR_H
+#define IRTTYSIR_H
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> // chipio_t
+
+#define IRTTY_IOC_MAGIC 'e'
+#define IRTTY_IOCTDONGLE _IO(IRTTY_IOC_MAGIC, 1)
+#define IRTTY_IOCGET _IOR(IRTTY_IOC_MAGIC, 2, struct irtty_info)
+#define IRTTY_IOC_MAXNR 2
+
+struct sirtty_cb {
+ magic_t magic;
+
+ struct sir_dev *dev;
+ struct tty_struct *tty;
+
+ chipio_t io; /* IrDA controller information */
+};
+
+#endif
diff --git a/drivers/staging/irda/drivers/kingsun-sir.c b/drivers/staging/irda/drivers/kingsun-sir.c
new file mode 100644
index 000000000000..4fd4ac2fe09f
--- /dev/null
+++ b/drivers/staging/irda/drivers/kingsun-sir.c
@@ -0,0 +1,634 @@
+/*****************************************************************************
+*
+* Filename: kingsun-sir.c
+* Version: 0.1.1
+* Description: Irda KingSun/DonShine USB Dongle
+* Status: Experimental
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
+*
+* Based on stir4200 and mcs7780 drivers, with (strange?) differences
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * This is my current (2007-04-25) understanding of how this dongle is supposed
+ * to work. This is based on reverse-engineering and examination of the packet
+ * data sent and received by the WinXP driver using USBSnoopy. Feel free to
+ * update here as more of this dongle is known:
+ *
+ * General: Unlike the other USB IrDA dongles, this particular dongle exposes,
+ * not two bulk (in and out) endpoints, but two *interrupt* ones. This dongle,
+ * like the bulk based ones (stir4200.c and mcs7780.c), requires polling in
+ * order to receive data.
+ * Transmission: Just like stir4200, this dongle uses a raw stream of data,
+ * which needs to be wrapped and escaped in a similar way as in stir4200.c.
+ * Reception: Poll-based, as in stir4200. Each read returns the contents of a
+ * 8-byte buffer, of which the first byte (LSB) indicates the number of bytes
+ * (1-7) of valid data contained within the remaining 7 bytes. For example, if
+ * the buffer had the following contents:
+ * 06 ff ff ff c0 01 04 aa
+ * This means that (06) there are 6 bytes of valid data. The byte 0xaa at the
+ * end is garbage (left over from a previous reception) and is discarded.
+ * If a read returns an "impossible" value as the length of valid data (such as
+ * 0x36) in the first byte, then the buffer is uninitialized (as is the case of
+ * first plug-in) and its contents should be discarded. There is currently no
+ * evidence that the top 5 bits of the 1st byte of the buffer can have values
+ * other than 0 once reception begins.
+ * Once valid bytes are collected, the assembled stream is a sequence of
+ * wrapped IrDA frames that is unwrapped and unescaped as in stir4200.c.
+ * BIG FAT WARNING: the dongle does *not* reset the RX buffer in any way after
+ * a successful read from the host, which means that in absence of further
+ * reception, repeated reads from the dongle will return the exact same
+ * contents repeatedly. Attempts to be smart and cache a previous read seem
+ * to result in corrupted packets, so this driver depends on the unwrap logic
+ * to sort out any repeated reads.
+ * Speed change: no commands observed so far to change speed, assumed fixed
+ * 9600bps (SIR).
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+/*
+ * According to lsusb, 0x07c0 is assigned to
+ * "Code Mercenaries Hard- und Software GmbH"
+ */
+#define KING_VENDOR_ID 0x07c0
+#define KING_PRODUCT_ID 0x4200
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* KingSun Co,Ltd IrDA/USB Bridge */
+ { USB_DEVICE(KING_VENDOR_ID, KING_PRODUCT_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+#define KINGSUN_MTT 0x07
+
+#define KINGSUN_FIFO_SIZE 4096
+#define KINGSUN_EP_IN 0
+#define KINGSUN_EP_OUT 1
+
+struct kingsun_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+
+ __u8 *in_buf; /* receive buffer */
+ __u8 *out_buf; /* transmit buffer */
+ __u8 max_rx; /* max. atomic read from dongle
+ (usually 8), also size of in_buf */
+ __u8 max_tx; /* max. atomic write to dongle
+ (usually 8) */
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ spinlock_t lock;
+ int receiving;
+
+ __u8 ep_in;
+ __u8 ep_out;
+
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+};
+
+/* Callback transmission routine */
+static void kingsun_send_irq(struct urb *urb)
+{
+ struct kingsun_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_send_irq: Network not running!\n");
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_send_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ }
+ netif_wake_queue(netdev);
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t kingsun_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kingsun_cb *kingsun;
+ int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ kingsun = netdev_priv(netdev);
+
+ spin_lock(&kingsun->lock);
+
+ /* Append data to the end of whatever data remains to be transmitted */
+ wraplen = async_wrap_skb(skb,
+ kingsun->out_buf,
+ KINGSUN_FIFO_SIZE);
+
+ /* Calculate how much data can be transmitted in this urb */
+ usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
+ usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
+ kingsun->out_buf, wraplen, kingsun_send_irq,
+ kingsun, 1);
+
+ if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_hard_xmit: failed tx_urb submit: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock(&kingsun->lock);
+
+ return NETDEV_TX_OK;
+}
+
+/* Receive callback function */
+static void kingsun_rcv_irq(struct urb *urb)
+{
+ struct kingsun_cb *kingsun = urb->context;
+ int ret;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ kingsun->receiving = 0;
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_rcv_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ kingsun->receiving = 0;
+ return;
+ }
+
+ if (urb->actual_length == kingsun->max_rx) {
+ __u8 *bytes = urb->transfer_buffer;
+ int i;
+
+ /* The very first byte in the buffer indicates the length of
+ valid data in the read. This byte must be in the range
+ 1..kingsun->max_rx -1 . Values outside this range indicate
+ an uninitialized Rx buffer when the dongle has just been
+ plugged in. */
+ if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
+ for (i = 1; i <= bytes[0]; i++) {
+ async_unwrap_char(kingsun->netdev,
+ &kingsun->netdev->stats,
+ &kingsun->rx_buff, bytes[i]);
+ }
+ kingsun->receiving =
+ (kingsun->rx_buff.state != OUTSIDE_FRAME)
+ ? 1 : 0;
+ }
+ } else if (urb->actual_length > 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "%s(): Unexpected response length, expected %d got %d\n",
+ __func__, kingsun->max_rx, urb->actual_length);
+ }
+ /* This urb has already been filled in kingsun_net_open */
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * Function kingsun_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int kingsun_net_open(struct net_device *netdev)
+{
+ struct kingsun_cb *kingsun = netdev_priv(netdev);
+ int err = -ENOMEM;
+ char hwname[16];
+
+ /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
+ kingsun->receiving = 0;
+
+ /* Initialize for SIR to copy data directly into skb. */
+ kingsun->rx_buff.in_frame = FALSE;
+ kingsun->rx_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!kingsun->rx_buff.skb)
+ goto free_mem;
+
+ skb_reserve(kingsun->rx_buff.skb, 1);
+ kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
+
+ kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->rx_urb)
+ goto free_mem;
+
+ kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->tx_urb)
+ goto free_mem;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ */
+ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
+ kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
+ if (!kingsun->irlap) {
+ dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
+ goto free_mem;
+ }
+
+ /* Start first reception */
+ usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
+ usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
+ kingsun->in_buf, kingsun->max_rx,
+ kingsun_rcv_irq, kingsun, 1);
+ kingsun->rx_urb->status = 0;
+ err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ if (err) {
+ dev_err(&kingsun->usbdev->dev,
+ "first urb-submit failed: %d\n", err);
+ goto close_irlap;
+ }
+
+ netif_start_queue(netdev);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs allocated and ready to fill
+ - max rx packet known (in max_rx)
+ - unwrap state machine initialized, in state outside of any frame
+ - receive request in progress
+ - IrLAP layer started, about to hand over packets to send
+ */
+
+ return 0;
+
+ close_irlap:
+ irlap_close(kingsun->irlap);
+ free_mem:
+ if (kingsun->tx_urb) {
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ }
+ if (kingsun->rx_urb) {
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ }
+ if (kingsun->rx_buff.skb) {
+ kfree_skb(kingsun->rx_buff.skb);
+ kingsun->rx_buff.skb = NULL;
+ kingsun->rx_buff.head = NULL;
+ }
+ return err;
+}
+
+/*
+ * Function kingsun_net_close (kingsun)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int kingsun_net_close(struct net_device *netdev)
+{
+ struct kingsun_cb *kingsun = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->tx_urb);
+ usb_kill_urb(kingsun->rx_urb);
+
+ usb_free_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->rx_urb);
+
+ kingsun->tx_urb = NULL;
+ kingsun->rx_urb = NULL;
+
+ kfree_skb(kingsun->rx_buff.skb);
+ kingsun->rx_buff.skb = NULL;
+ kingsun->rx_buff.head = NULL;
+ kingsun->rx_buff.in_frame = FALSE;
+ kingsun->rx_buff.state = OUTSIDE_FRAME;
+ kingsun->receiving = 0;
+
+ /* Stop and remove instance of IrLAP */
+ if (kingsun->irlap)
+ irlap_close(kingsun->irlap);
+
+ kingsun->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int kingsun_net_ioctl(struct net_device *netdev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct kingsun_cb *kingsun = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(kingsun->netdev))
+ /* No observed commands for speed change */
+ ret = -EOPNOTSUPP;
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(kingsun->netdev))
+ irda_device_set_media_busy(kingsun->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = kingsun->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops kingsun_ops = {
+ .ndo_start_xmit = kingsun_hard_xmit,
+ .ndo_open = kingsun_net_open,
+ .ndo_stop = kingsun_net_close,
+ .ndo_do_ioctl = kingsun_net_ioctl,
+};
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ */
+static int kingsun_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *interface;
+ struct usb_endpoint_descriptor *endpoint;
+
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct kingsun_cb *kingsun = NULL;
+ struct net_device *net = NULL;
+ int ret = -ENOMEM;
+ int pipe, maxp_in, maxp_out;
+ __u8 ep_in;
+ __u8 ep_out;
+
+ /* Check that there really are two interrupt endpoints.
+ Check based on the one in drivers/usb/input/usbmouse.c
+ */
+ interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints != 2) {
+ dev_err(&intf->dev,
+ "kingsun-sir: expected 2 endpoints, found %d\n",
+ interface->desc.bNumEndpoints);
+ return -ENODEV;
+ }
+ endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
+ if (!usb_endpoint_is_int_in(endpoint)) {
+ dev_err(&intf->dev,
+ "kingsun-sir: endpoint 0 is not interrupt IN\n");
+ return -ENODEV;
+ }
+
+ ep_in = endpoint->bEndpointAddress;
+ pipe = usb_rcvintpipe(dev, ep_in);
+ maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ if (maxp_in > 255 || maxp_in <= 1) {
+ dev_err(&intf->dev,
+ "endpoint 0 has max packet size %d not in range\n",
+ maxp_in);
+ return -ENODEV;
+ }
+
+ endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
+ if (!usb_endpoint_is_int_out(endpoint)) {
+ dev_err(&intf->dev,
+ "kingsun-sir: endpoint 1 is not interrupt OUT\n");
+ return -ENODEV;
+ }
+
+ ep_out = endpoint->bEndpointAddress;
+ pipe = usb_sndintpipe(dev, ep_out);
+ maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*kingsun));
+ if(!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ kingsun = netdev_priv(net);
+ kingsun->irlap = NULL;
+ kingsun->tx_urb = NULL;
+ kingsun->rx_urb = NULL;
+ kingsun->ep_in = ep_in;
+ kingsun->ep_out = ep_out;
+ kingsun->in_buf = NULL;
+ kingsun->out_buf = NULL;
+ kingsun->max_rx = (__u8)maxp_in;
+ kingsun->max_tx = (__u8)maxp_out;
+ kingsun->netdev = net;
+ kingsun->usbdev = dev;
+ kingsun->rx_buff.in_frame = FALSE;
+ kingsun->rx_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_buff.skb = NULL;
+ kingsun->receiving = 0;
+ spin_lock_init(&kingsun->lock);
+
+ /* Allocate input buffer */
+ kingsun->in_buf = kmalloc(kingsun->max_rx, GFP_KERNEL);
+ if (!kingsun->in_buf)
+ goto free_mem;
+
+ /* Allocate output buffer */
+ kingsun->out_buf = kmalloc(KINGSUN_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->out_buf)
+ goto free_mem;
+
+ printk(KERN_INFO "KingSun/DonShine IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&kingsun->qos);
+
+ /* That's the Rx capability. */
+ kingsun->qos.baud_rate.bits &= IR_9600;
+ kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
+ irda_qos_bits_to_value(&kingsun->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &kingsun_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto free_mem;
+
+ dev_info(&net->dev, "IrDA: Registered KingSun/DonShine device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, kingsun);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs not allocated, set to NULL
+ - max rx packet known (in max_rx)
+ - unwrap state machine (partially) initialized, but skb == NULL
+ */
+
+ return 0;
+
+free_mem:
+ kfree(kingsun->out_buf);
+ kfree(kingsun->in_buf);
+ free_netdev(net);
+err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void kingsun_disconnect(struct usb_interface *intf)
+{
+ struct kingsun_cb *kingsun = usb_get_intfdata(intf);
+
+ if (!kingsun)
+ return;
+
+ unregister_netdev(kingsun->netdev);
+
+ /* Mop up receive && transmit urb's */
+ if (kingsun->tx_urb != NULL) {
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ }
+ if (kingsun->rx_urb != NULL) {
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ }
+
+ kfree(kingsun->out_buf);
+ kfree(kingsun->in_buf);
+ free_netdev(kingsun->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int kingsun_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct kingsun_cb *kingsun = usb_get_intfdata(intf);
+
+ netif_device_detach(kingsun->netdev);
+ if (kingsun->tx_urb != NULL) usb_kill_urb(kingsun->tx_urb);
+ if (kingsun->rx_urb != NULL) usb_kill_urb(kingsun->rx_urb);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int kingsun_resume(struct usb_interface *intf)
+{
+ struct kingsun_cb *kingsun = usb_get_intfdata(intf);
+
+ if (kingsun->rx_urb != NULL)
+ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ netif_device_attach(kingsun->netdev);
+
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "kingsun-sir",
+ .probe = kingsun_probe,
+ .disconnect = kingsun_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = kingsun_suspend,
+ .resume = kingsun_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/ks959-sir.c b/drivers/staging/irda/drivers/ks959-sir.c
new file mode 100644
index 000000000000..8025741e7586
--- /dev/null
+++ b/drivers/staging/irda/drivers/ks959-sir.c
@@ -0,0 +1,912 @@
+/*****************************************************************************
+*
+* Filename: ks959-sir.c
+* Version: 0.1.2
+* Description: Irda KingSun KS-959 USB Dongle
+* Status: Experimental
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
+* with help from Domen Puncer <domen@coderock.org>
+*
+* Based on stir4200, mcs7780, kingsun-sir drivers.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * Following is my most current (2007-07-17) understanding of how the Kingsun
+ * KS-959 dongle is supposed to work. This information was deduced by
+ * reverse-engineering and examining the USB traffic captured with USBSnoopy
+ * from the WinXP driver. Feel free to update here as more of the dongle is
+ * known.
+ *
+ * My most sincere thanks must go to Domen Puncer <domen@coderock.org> for
+ * invaluable help in cracking the obfuscation and padding required for this
+ * dongle.
+ *
+ * General: This dongle exposes one interface with one interrupt IN endpoint.
+ * However, the interrupt endpoint is NOT used at all for this dongle. Instead,
+ * this dongle uses control transfers for everything, including sending and
+ * receiving the IrDA frame data. Apparently the interrupt endpoint is just a
+ * dummy to ensure the dongle has a valid interface to present to the PC.And I
+ * thought the DonShine dongle was weird... In addition, this dongle uses
+ * obfuscation (?!?!), applied at the USB level, to hide the traffic, both sent
+ * and received, from the dongle. I call it obfuscation because the XOR keying
+ * and padding required to produce an USB traffic acceptable for the dongle can
+ * not be explained by any other technical requirement.
+ *
+ * Transmission: To transmit an IrDA frame, the driver must prepare a control
+ * URB with the following as a setup packet:
+ * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x09
+ * wValue <length of valid data before padding, little endian>
+ * wIndex 0x0000
+ * wLength <length of padded data>
+ * The payload packet must be manually wrapped and escaped (as in stir4200.c),
+ * then padded and obfuscated before being sent. Both padding and obfuscation
+ * are implemented in the procedure obfuscate_tx_buffer(). Suffice to say, the
+ * designer/programmer of the dongle used his name as a source for the
+ * obfuscation. WTF?!
+ * Apparently the dongle cannot handle payloads larger than 256 bytes. The
+ * driver has to perform fragmentation in order to send anything larger than
+ * this limit.
+ *
+ * Reception: To receive data, the driver must poll the dongle regularly (like
+ * kingsun-sir.c) with control URBs and the following as a setup packet:
+ * bRequestType USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x01
+ * wValue 0x0200
+ * wIndex 0x0000
+ * wLength 0x0800 (size of available buffer)
+ * If there is data to be read, it will be returned as the response payload.
+ * This data is (apparently) not padded, but it is obfuscated. To de-obfuscate
+ * it, the driver must XOR every byte, in sequence, with a value that starts at
+ * 1 and is incremented with each byte processed, and then with 0x55. The value
+ * incremented with each byte processed overflows as an unsigned char. The
+ * resulting bytes form a wrapped SIR frame that is unwrapped and unescaped
+ * as in stir4200.c The incremented value is NOT reset with each frame, but is
+ * kept across the entire session with the dongle. Also, the dongle inserts an
+ * extra garbage byte with value 0x95 (after decoding) every 0xff bytes, which
+ * must be skipped.
+ *
+ * Speed change: To change the speed of the dongle, the driver prepares a
+ * control URB with the following as a setup packet:
+ * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x09
+ * wValue 0x0200
+ * wIndex 0x0001
+ * wLength 0x0008 (length of the payload)
+ * The payload is a 8-byte record, apparently identical to the one used in
+ * drivers/usb/serial/cypress_m8.c to change speed:
+ * __u32 baudSpeed;
+ * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits
+ * unsigned int : 1;
+ * unsigned int stopBits : 1;
+ * unsigned int parityEnable : 1;
+ * unsigned int parityType : 1;
+ * unsigned int : 1;
+ * unsigned int reset : 1;
+ * unsigned char reserved[3]; // set to 0
+ *
+ * For now only SIR speeds have been observed with this dongle. Therefore,
+ * nothing is known on what changes (if any) must be done to frame wrapping /
+ * unwrapping for higher than SIR speeds. This driver assumes no change is
+ * necessary and announces support for all the way to 57600 bps. Although the
+ * package announces support for up to 4MBps, tests with a Sony Ericcson K300
+ * phone show corruption when receiving large frames at 115200 bps, the highest
+ * speed announced by the phone. However, transmission at 115200 bps is OK. Go
+ * figure. Since I don't know whether the phone or the dongle is at fault, max
+ * announced speed is 57600 bps until someone produces a device that can run
+ * at higher speeds with this dongle.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#define KS959_VENDOR_ID 0x07d0
+#define KS959_PRODUCT_ID 0x4959
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* KingSun Co,Ltd IrDA/USB Bridge */
+ {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+#define KINGSUN_MTT 0x07
+#define KINGSUN_REQ_RECV 0x01
+#define KINGSUN_REQ_SEND 0x09
+
+#define KINGSUN_RCV_FIFO_SIZE 2048 /* Max length we can receive */
+#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */
+#define KINGSUN_SND_PACKET_SIZE 256 /* Max packet dongle can handle */
+
+struct ks959_speedparams {
+ __le32 baudrate; /* baud rate, little endian */
+ __u8 flags;
+ __u8 reserved[3];
+} __packed;
+
+#define KS_DATA_5_BITS 0x00
+#define KS_DATA_6_BITS 0x01
+#define KS_DATA_7_BITS 0x02
+#define KS_DATA_8_BITS 0x03
+
+#define KS_STOP_BITS_1 0x00
+#define KS_STOP_BITS_2 0x08
+
+#define KS_PAR_DISABLE 0x00
+#define KS_PAR_EVEN 0x10
+#define KS_PAR_ODD 0x30
+#define KS_RESET 0x80
+
+struct ks959_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+
+ struct usb_ctrlrequest *tx_setuprequest;
+ struct urb *tx_urb;
+ __u8 *tx_buf_clear;
+ unsigned int tx_buf_clear_used;
+ unsigned int tx_buf_clear_sent;
+ __u8 *tx_buf_xored;
+
+ struct usb_ctrlrequest *rx_setuprequest;
+ struct urb *rx_urb;
+ __u8 *rx_buf;
+ __u8 rx_variable_xormask;
+ iobuff_t rx_unwrap_buff;
+
+ struct usb_ctrlrequest *speed_setuprequest;
+ struct urb *speed_urb;
+ struct ks959_speedparams speedparams;
+ unsigned int new_speed;
+
+ spinlock_t lock;
+ int receiving;
+};
+
+/* Procedure to perform the obfuscation/padding expected by the dongle
+ *
+ * buf_cleartext (IN) Cleartext version of the IrDA frame to transmit
+ * len_cleartext (IN) Length of the cleartext version of IrDA frame
+ * buf_xoredtext (OUT) Obfuscated version of frame built by proc
+ * len_maxbuf (OUT) Maximum space available at buf_xoredtext
+ *
+ * (return) length of obfuscated frame with padding
+ *
+ * If not enough space (as indicated by len_maxbuf vs. required padding),
+ * zero is returned
+ *
+ * The value of lookup_string is actually a required portion of the algorithm.
+ * Seems the designer of the dongle wanted to state who exactly is responsible
+ * for implementing obfuscation. Send your best (or other) wishes to him ]:-)
+ */
+static unsigned int obfuscate_tx_buffer(const __u8 * buf_cleartext,
+ unsigned int len_cleartext,
+ __u8 * buf_xoredtext,
+ unsigned int len_maxbuf)
+{
+ unsigned int len_xoredtext;
+
+ /* Calculate required length with padding, check for necessary space */
+ len_xoredtext = ((len_cleartext + 7) & ~0x7) + 0x10;
+ if (len_xoredtext <= len_maxbuf) {
+ static const __u8 lookup_string[] = "wangshuofei19710";
+ __u8 xor_mask;
+
+ /* Unlike the WinXP driver, we *do* clear out the padding */
+ memset(buf_xoredtext, 0, len_xoredtext);
+
+ xor_mask = lookup_string[(len_cleartext & 0x0f) ^ 0x06] ^ 0x55;
+
+ while (len_cleartext-- > 0) {
+ *buf_xoredtext++ = *buf_cleartext++ ^ xor_mask;
+ }
+ } else {
+ len_xoredtext = 0;
+ }
+ return len_xoredtext;
+}
+
+/* Callback transmission routine */
+static void ks959_speed_irq(struct urb *urb)
+{
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&urb->dev->dev,
+ "ks959_speed_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ }
+}
+
+/* Send a control request to change speed of the dongle */
+static int ks959_change_speed(struct ks959_cb *kingsun, unsigned speed)
+{
+ static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400,
+ 57600, 115200, 576000, 1152000, 4000000, 0
+ };
+ int err;
+ unsigned int i;
+
+ if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL)
+ return -ENOMEM;
+
+ /* Check that requested speed is among the supported ones */
+ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ;
+ if (supported_speeds[i] == 0)
+ return -EOPNOTSUPP;
+
+ memset(&(kingsun->speedparams), 0, sizeof(struct ks959_speedparams));
+ kingsun->speedparams.baudrate = cpu_to_le32(speed);
+ kingsun->speedparams.flags = KS_DATA_8_BITS;
+
+ /* speed_setuprequest pre-filled in ks959_probe */
+ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev,
+ usb_sndctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->speed_setuprequest,
+ &(kingsun->speedparams),
+ sizeof(struct ks959_speedparams), ks959_speed_irq,
+ kingsun);
+ kingsun->speed_urb->status = 0;
+ err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC);
+
+ return err;
+}
+
+/* Submit one fragment of an IrDA frame to the dongle */
+static void ks959_send_irq(struct urb *urb);
+static int ks959_submit_tx_fragment(struct ks959_cb *kingsun)
+{
+ unsigned int padlen;
+ unsigned int wraplen;
+ int ret;
+
+ /* Check whether current plaintext can produce a padded buffer that fits
+ within the range handled by the dongle */
+ wraplen = (KINGSUN_SND_PACKET_SIZE & ~0x7) - 0x10;
+ if (wraplen > kingsun->tx_buf_clear_used)
+ wraplen = kingsun->tx_buf_clear_used;
+
+ /* Perform dongle obfuscation. Also remove the portion of the frame that
+ was just obfuscated and will now be sent to the dongle. */
+ padlen = obfuscate_tx_buffer(kingsun->tx_buf_clear, wraplen,
+ kingsun->tx_buf_xored,
+ KINGSUN_SND_PACKET_SIZE);
+
+ /* Calculate how much data can be transmitted in this urb */
+ kingsun->tx_setuprequest->wValue = cpu_to_le16(wraplen);
+ kingsun->tx_setuprequest->wLength = cpu_to_le16(padlen);
+ /* Rest of the fields were filled in ks959_probe */
+ usb_fill_control_urb(kingsun->tx_urb, kingsun->usbdev,
+ usb_sndctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->tx_setuprequest,
+ kingsun->tx_buf_xored, padlen,
+ ks959_send_irq, kingsun);
+ kingsun->tx_urb->status = 0;
+ ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC);
+
+ /* Remember how much data was sent, in order to update at callback */
+ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0;
+ return ret;
+}
+
+/* Callback transmission routine */
+static void ks959_send_irq(struct urb *urb)
+{
+ struct ks959_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+ int ret = 0;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_send_irq: Network not running!\n");
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_send_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ return;
+ }
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* Update data remaining to be sent */
+ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) {
+ memmove(kingsun->tx_buf_clear,
+ kingsun->tx_buf_clear +
+ kingsun->tx_buf_clear_sent,
+ kingsun->tx_buf_clear_used -
+ kingsun->tx_buf_clear_sent);
+ }
+ kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent;
+ kingsun->tx_buf_clear_sent = 0;
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* There is more data to be sent */
+ if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_send_irq: failed tx_urb submit: %d\n",
+ ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ }
+ } else {
+ /* All data sent, send next speed && wake network queue */
+ if (kingsun->new_speed != -1 &&
+ cpu_to_le32(kingsun->new_speed) !=
+ kingsun->speedparams.baudrate)
+ ks959_change_speed(kingsun, kingsun->new_speed);
+
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t ks959_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ks959_cb *kingsun;
+ unsigned int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ kingsun = netdev_priv(netdev);
+
+ spin_lock(&kingsun->lock);
+ kingsun->new_speed = irda_get_next_speed(skb);
+
+ /* Append data to the end of whatever data remains to be transmitted */
+ wraplen =
+ async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
+ kingsun->tx_buf_clear_used = wraplen;
+
+ if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ks959_hard_xmit: failed tx_urb submit: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock(&kingsun->lock);
+
+ return NETDEV_TX_OK;
+}
+
+/* Receive callback function */
+static void ks959_rcv_irq(struct urb *urb)
+{
+ struct ks959_cb *kingsun = urb->context;
+ int ret;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ kingsun->receiving = 0;
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "kingsun_rcv_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ kingsun->receiving = 0;
+ return;
+ }
+
+ if (urb->actual_length > 0) {
+ __u8 *bytes = urb->transfer_buffer;
+ unsigned int i;
+
+ for (i = 0; i < urb->actual_length; i++) {
+ /* De-obfuscation implemented here: variable portion of
+ xormask is incremented, and then used with the encoded
+ byte for the XOR. The result of the operation is used
+ to unwrap the SIR frame. */
+ kingsun->rx_variable_xormask++;
+ bytes[i] =
+ bytes[i] ^ kingsun->rx_variable_xormask ^ 0x55u;
+
+ /* rx_variable_xormask doubles as an index counter so we
+ can skip the byte at 0xff (wrapped around to 0).
+ */
+ if (kingsun->rx_variable_xormask != 0) {
+ async_unwrap_char(kingsun->netdev,
+ &kingsun->netdev->stats,
+ &kingsun->rx_unwrap_buff,
+ bytes[i]);
+ }
+ }
+ kingsun->receiving =
+ (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
+ }
+
+ /* This urb has already been filled in kingsun_net_open. Setup
+ packet must be re-filled, but it is assumed that urb keeps the
+ pointer to the initial setup packet, as well as the payload buffer.
+ Setup packet is already pre-filled at ks959_probe.
+ */
+ urb->status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * Function kingsun_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int ks959_net_open(struct net_device *netdev)
+{
+ struct ks959_cb *kingsun = netdev_priv(netdev);
+ int err = -ENOMEM;
+ char hwname[16];
+
+ /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
+ kingsun->receiving = 0;
+
+ /* Initialize for SIR to copy data directly into skb. */
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
+ kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!kingsun->rx_unwrap_buff.skb)
+ goto free_mem;
+
+ skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
+ kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
+
+ kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->rx_urb)
+ goto free_mem;
+
+ kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->tx_urb)
+ goto free_mem;
+
+ kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->speed_urb)
+ goto free_mem;
+
+ /* Initialize speed for dongle */
+ kingsun->new_speed = 9600;
+ err = ks959_change_speed(kingsun, 9600);
+ if (err < 0)
+ goto free_mem;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ */
+ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
+ kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
+ if (!kingsun->irlap) {
+ err = -ENOMEM;
+ dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
+ goto free_mem;
+ }
+
+ /* Start reception. Setup request already pre-filled in ks959_probe */
+ usb_fill_control_urb(kingsun->rx_urb, kingsun->usbdev,
+ usb_rcvctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->rx_setuprequest,
+ kingsun->rx_buf, KINGSUN_RCV_FIFO_SIZE,
+ ks959_rcv_irq, kingsun);
+ kingsun->rx_urb->status = 0;
+ err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ if (err) {
+ dev_err(&kingsun->usbdev->dev,
+ "first urb-submit failed: %d\n", err);
+ goto close_irlap;
+ }
+
+ netif_start_queue(netdev);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs allocated and ready to fill
+ - max rx packet known (in max_rx)
+ - unwrap state machine initialized, in state outside of any frame
+ - receive request in progress
+ - IrLAP layer started, about to hand over packets to send
+ */
+
+ return 0;
+
+ close_irlap:
+ irlap_close(kingsun->irlap);
+ free_mem:
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ if (kingsun->rx_unwrap_buff.skb) {
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ }
+ return err;
+}
+
+/*
+ * Function kingsun_net_close (kingsun)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int ks959_net_close(struct net_device *netdev)
+{
+ struct ks959_cb *kingsun = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->receiving = 0;
+
+ /* Stop and remove instance of IrLAP */
+ if (kingsun->irlap)
+ irlap_close(kingsun->irlap);
+
+ kingsun->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
+ struct ks959_cb *kingsun = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(kingsun->netdev))
+ return ks959_change_speed(kingsun, irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(kingsun->netdev))
+ irda_device_set_media_busy(kingsun->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = kingsun->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops ks959_ops = {
+ .ndo_start_xmit = ks959_hard_xmit,
+ .ndo_open = ks959_net_open,
+ .ndo_stop = ks959_net_close,
+ .ndo_do_ioctl = ks959_net_ioctl,
+};
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ */
+static int ks959_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ks959_cb *kingsun = NULL;
+ struct net_device *net = NULL;
+ int ret = -ENOMEM;
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*kingsun));
+ if (!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ kingsun = netdev_priv(net);
+ kingsun->netdev = net;
+ kingsun->usbdev = dev;
+ kingsun->irlap = NULL;
+ kingsun->tx_setuprequest = NULL;
+ kingsun->tx_urb = NULL;
+ kingsun->tx_buf_clear = NULL;
+ kingsun->tx_buf_xored = NULL;
+ kingsun->tx_buf_clear_used = 0;
+ kingsun->tx_buf_clear_sent = 0;
+
+ kingsun->rx_setuprequest = NULL;
+ kingsun->rx_urb = NULL;
+ kingsun->rx_buf = NULL;
+ kingsun->rx_variable_xormask = 0;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->receiving = 0;
+ spin_lock_init(&kingsun->lock);
+
+ kingsun->speed_setuprequest = NULL;
+ kingsun->speed_urb = NULL;
+ kingsun->speedparams.baudrate = 0;
+
+ /* Allocate input buffer */
+ kingsun->rx_buf = kmalloc(KINGSUN_RCV_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->rx_buf)
+ goto free_mem;
+
+ /* Allocate input setup packet */
+ kingsun->rx_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->rx_setuprequest)
+ goto free_mem;
+ kingsun->rx_setuprequest->bRequestType =
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->rx_setuprequest->bRequest = KINGSUN_REQ_RECV;
+ kingsun->rx_setuprequest->wValue = cpu_to_le16(0x0200);
+ kingsun->rx_setuprequest->wIndex = 0;
+ kingsun->rx_setuprequest->wLength = cpu_to_le16(KINGSUN_RCV_FIFO_SIZE);
+
+ /* Allocate output buffer */
+ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->tx_buf_clear)
+ goto free_mem;
+ kingsun->tx_buf_xored = kmalloc(KINGSUN_SND_PACKET_SIZE, GFP_KERNEL);
+ if (!kingsun->tx_buf_xored)
+ goto free_mem;
+
+ /* Allocate and initialize output setup packet */
+ kingsun->tx_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->tx_setuprequest)
+ goto free_mem;
+ kingsun->tx_setuprequest->bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->tx_setuprequest->bRequest = KINGSUN_REQ_SEND;
+ kingsun->tx_setuprequest->wValue = 0;
+ kingsun->tx_setuprequest->wIndex = 0;
+ kingsun->tx_setuprequest->wLength = 0;
+
+ /* Allocate and initialize speed setup packet */
+ kingsun->speed_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->speed_setuprequest)
+ goto free_mem;
+ kingsun->speed_setuprequest->bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND;
+ kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200);
+ kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001);
+ kingsun->speed_setuprequest->wLength =
+ cpu_to_le16(sizeof(struct ks959_speedparams));
+
+ printk(KERN_INFO "KingSun KS-959 IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&kingsun->qos);
+
+ /* Baud rates known to be supported. Please uncomment if devices (other
+ than a SonyEriccson K300 phone) can be shown to support higher speed
+ with this dongle.
+ */
+ kingsun->qos.baud_rate.bits =
+ IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600;
+ kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
+ irda_qos_bits_to_value(&kingsun->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &ks959_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto free_mem;
+
+ dev_info(&net->dev, "IrDA: Registered KingSun KS-959 device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, kingsun);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - setup requests pre-filled
+ - urbs not allocated, set to NULL
+ - max rx packet known (is KINGSUN_FIFO_SIZE)
+ - unwrap state machine (partially) initialized, but skb == NULL
+ */
+
+ return 0;
+
+ free_mem:
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_setuprequest);
+ kfree(kingsun->tx_buf_xored);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_setuprequest);
+ kfree(kingsun->rx_buf);
+ free_netdev(net);
+ err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void ks959_disconnect(struct usb_interface *intf)
+{
+ struct ks959_cb *kingsun = usb_get_intfdata(intf);
+
+ if (!kingsun)
+ return;
+
+ unregister_netdev(kingsun->netdev);
+
+ /* Mop up receive && transmit urb's */
+ if (kingsun->speed_urb != NULL) {
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+ }
+ if (kingsun->tx_urb != NULL) {
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ }
+ if (kingsun->rx_urb != NULL) {
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ }
+
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_setuprequest);
+ kfree(kingsun->tx_buf_xored);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_setuprequest);
+ kfree(kingsun->rx_buf);
+ free_netdev(kingsun->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int ks959_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct ks959_cb *kingsun = usb_get_intfdata(intf);
+
+ netif_device_detach(kingsun->netdev);
+ if (kingsun->speed_urb != NULL)
+ usb_kill_urb(kingsun->speed_urb);
+ if (kingsun->tx_urb != NULL)
+ usb_kill_urb(kingsun->tx_urb);
+ if (kingsun->rx_urb != NULL)
+ usb_kill_urb(kingsun->rx_urb);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int ks959_resume(struct usb_interface *intf)
+{
+ struct ks959_cb *kingsun = usb_get_intfdata(intf);
+
+ if (kingsun->rx_urb != NULL) {
+ /* Setup request already filled in ks959_probe */
+ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ }
+ netif_device_attach(kingsun->netdev);
+
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "ks959-sir",
+ .probe = ks959_probe,
+ .disconnect = ks959_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = ks959_suspend,
+ .resume = ks959_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/ksdazzle-sir.c b/drivers/staging/irda/drivers/ksdazzle-sir.c
new file mode 100644
index 000000000000..d2a0755df596
--- /dev/null
+++ b/drivers/staging/irda/drivers/ksdazzle-sir.c
@@ -0,0 +1,813 @@
+/*****************************************************************************
+*
+* Filename: ksdazzle.c
+* Version: 0.1.2
+* Description: Irda KingSun Dazzle USB Dongle
+* Status: Experimental
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
+*
+* Based on stir4200, mcs7780, kingsun-sir drivers.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * Following is my most current (2007-07-26) understanding of how the Kingsun
+ * 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This
+ * information was deduced by examining the USB traffic captured with USBSnoopy
+ * from the WinXP driver. Feel free to update here as more of the dongle is
+ * known.
+ *
+ * General: This dongle exposes one interface with two interrupt endpoints, one
+ * IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine
+ * dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and
+ * unwrapped manually as in stir4200, kingsun-sir, and ks959-sir.
+ *
+ * Transmission: To transmit an IrDA frame, it is necessary to wrap it, then
+ * split it into multiple segments of up to 7 bytes each, and transmit each in
+ * sequence. It seems that sending a single big block (like kingsun-sir does)
+ * won't work with this dongle. Each segment needs to be prefixed with a value
+ * equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload
+ * of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9,
+ * and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the
+ * payload, not considered by the prefix, are ignored (set to 0 by this
+ * implementation).
+ *
+ * Reception: To receive data, the driver must poll the dongle regularly (like
+ * kingsun-sir.c) with interrupt URBs. If data is available, it will be returned
+ * in payloads from 0 to 8 bytes long. When concatenated, these payloads form
+ * a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir
+ *
+ * Speed change: To change the speed of the dongle, the driver prepares a
+ * control URB with the following as a setup packet:
+ * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
+ * bRequest 0x09
+ * wValue 0x0200
+ * wIndex 0x0001
+ * wLength 0x0008 (length of the payload)
+ * The payload is a 8-byte record, apparently identical to the one used in
+ * drivers/usb/serial/cypress_m8.c to change speed:
+ * __u32 baudSpeed;
+ * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits
+ * unsigned int : 1;
+ * unsigned int stopBits : 1;
+ * unsigned int parityEnable : 1;
+ * unsigned int parityType : 1;
+ * unsigned int : 1;
+ * unsigned int reset : 1;
+ * unsigned char reserved[3]; // set to 0
+ *
+ * For now only SIR speeds have been observed with this dongle. Therefore,
+ * nothing is known on what changes (if any) must be done to frame wrapping /
+ * unwrapping for higher than SIR speeds. This driver assumes no change is
+ * necessary and announces support for all the way to 115200 bps.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#define KSDAZZLE_VENDOR_ID 0x07d0
+#define KSDAZZLE_PRODUCT_ID 0x4100
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* KingSun Co,Ltd IrDA/USB Bridge */
+ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+#define KINGSUN_MTT 0x07
+#define KINGSUN_REQ_RECV 0x01
+#define KINGSUN_REQ_SEND 0x09
+
+#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */
+#define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */
+
+struct ksdazzle_speedparams {
+ __le32 baudrate; /* baud rate, little endian */
+ __u8 flags;
+ __u8 reserved[3];
+} __packed;
+
+#define KS_DATA_5_BITS 0x00
+#define KS_DATA_6_BITS 0x01
+#define KS_DATA_7_BITS 0x02
+#define KS_DATA_8_BITS 0x03
+
+#define KS_STOP_BITS_1 0x00
+#define KS_STOP_BITS_2 0x08
+
+#define KS_PAR_DISABLE 0x00
+#define KS_PAR_EVEN 0x10
+#define KS_PAR_ODD 0x30
+#define KS_RESET 0x80
+
+#define KINGSUN_EP_IN 0
+#define KINGSUN_EP_OUT 1
+
+struct ksdazzle_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+
+ struct urb *tx_urb;
+ __u8 *tx_buf_clear;
+ unsigned int tx_buf_clear_used;
+ unsigned int tx_buf_clear_sent;
+ __u8 tx_payload[8];
+
+ struct urb *rx_urb;
+ __u8 *rx_buf;
+ iobuff_t rx_unwrap_buff;
+
+ struct usb_ctrlrequest *speed_setuprequest;
+ struct urb *speed_urb;
+ struct ksdazzle_speedparams speedparams;
+ unsigned int new_speed;
+
+ __u8 ep_in;
+ __u8 ep_out;
+
+ spinlock_t lock;
+ int receiving;
+};
+
+/* Callback transmission routine */
+static void ksdazzle_speed_irq(struct urb *urb)
+{
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0)
+ dev_err(&urb->dev->dev,
+ "ksdazzle_speed_irq: urb asynchronously failed - %d\n",
+ urb->status);
+}
+
+/* Send a control request to change speed of the dongle */
+static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed)
+{
+ static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400,
+ 57600, 115200, 576000, 1152000, 4000000, 0
+ };
+ int err;
+ unsigned int i;
+
+ if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL)
+ return -ENOMEM;
+
+ /* Check that requested speed is among the supported ones */
+ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ;
+ if (supported_speeds[i] == 0)
+ return -EOPNOTSUPP;
+
+ memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams));
+ kingsun->speedparams.baudrate = cpu_to_le32(speed);
+ kingsun->speedparams.flags = KS_DATA_8_BITS;
+
+ /* speed_setuprequest pre-filled in ksdazzle_probe */
+ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev,
+ usb_sndctrlpipe(kingsun->usbdev, 0),
+ (unsigned char *)kingsun->speed_setuprequest,
+ &(kingsun->speedparams),
+ sizeof(struct ksdazzle_speedparams),
+ ksdazzle_speed_irq, kingsun);
+ kingsun->speed_urb->status = 0;
+ err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC);
+
+ return err;
+}
+
+/* Submit one fragment of an IrDA frame to the dongle */
+static void ksdazzle_send_irq(struct urb *urb);
+static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun)
+{
+ unsigned int wraplen;
+ int ret;
+
+ /* We can send at most 7 bytes of payload at a time */
+ wraplen = 7;
+ if (wraplen > kingsun->tx_buf_clear_used)
+ wraplen = kingsun->tx_buf_clear_used;
+
+ /* Prepare payload prefix with used length */
+ memset(kingsun->tx_payload, 0, 8);
+ kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen;
+ memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen);
+
+ usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
+ usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
+ kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1);
+ kingsun->tx_urb->status = 0;
+ ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC);
+
+ /* Remember how much data was sent, in order to update at callback */
+ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0;
+ return ret;
+}
+
+/* Callback transmission routine */
+static void ksdazzle_send_irq(struct urb *urb)
+{
+ struct ksdazzle_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+ int ret = 0;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(kingsun->netdev)) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_send_irq: Network not running!\n");
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_send_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ return;
+ }
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* Update data remaining to be sent */
+ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) {
+ memmove(kingsun->tx_buf_clear,
+ kingsun->tx_buf_clear +
+ kingsun->tx_buf_clear_sent,
+ kingsun->tx_buf_clear_used -
+ kingsun->tx_buf_clear_sent);
+ }
+ kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent;
+ kingsun->tx_buf_clear_sent = 0;
+
+ if (kingsun->tx_buf_clear_used > 0) {
+ /* There is more data to be sent */
+ if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_send_irq: failed tx_urb submit: %d\n",
+ ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ }
+ } else {
+ /* All data sent, send next speed && wake network queue */
+ if (kingsun->new_speed != -1 &&
+ cpu_to_le32(kingsun->new_speed) !=
+ kingsun->speedparams.baudrate)
+ ksdazzle_change_speed(kingsun,
+ kingsun->new_speed);
+
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t ksdazzle_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ksdazzle_cb *kingsun;
+ unsigned int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ kingsun = netdev_priv(netdev);
+
+ spin_lock(&kingsun->lock);
+ kingsun->new_speed = irda_get_next_speed(skb);
+
+ /* Append data to the end of whatever data remains to be transmitted */
+ wraplen =
+ async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE);
+ kingsun->tx_buf_clear_used = wraplen;
+
+ if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_hard_xmit: failed tx_urb submit: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ netdev->stats.tx_errors++;
+ netif_start_queue(netdev);
+ }
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock(&kingsun->lock);
+
+ return NETDEV_TX_OK;
+}
+
+/* Receive callback function */
+static void ksdazzle_rcv_irq(struct urb *urb)
+{
+ struct ksdazzle_cb *kingsun = urb->context;
+ struct net_device *netdev = kingsun->netdev;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(netdev)) {
+ kingsun->receiving = 0;
+ return;
+ }
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0) {
+ dev_err(&kingsun->usbdev->dev,
+ "ksdazzle_rcv_irq: urb asynchronously failed - %d\n",
+ urb->status);
+ kingsun->receiving = 0;
+ return;
+ }
+
+ if (urb->actual_length > 0) {
+ __u8 *bytes = urb->transfer_buffer;
+ unsigned int i;
+
+ for (i = 0; i < urb->actual_length; i++) {
+ async_unwrap_char(netdev, &netdev->stats,
+ &kingsun->rx_unwrap_buff, bytes[i]);
+ }
+ kingsun->receiving =
+ (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
+ }
+
+ /* This urb has already been filled in ksdazzle_net_open. It is assumed that
+ urb keeps the pointer to the payload buffer.
+ */
+ urb->status = 0;
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/*
+ * Function ksdazzle_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int ksdazzle_net_open(struct net_device *netdev)
+{
+ struct ksdazzle_cb *kingsun = netdev_priv(netdev);
+ int err = -ENOMEM;
+ char hwname[16];
+
+ /* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */
+ kingsun->receiving = 0;
+
+ /* Initialize for SIR to copy data directly into skb. */
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
+ kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!kingsun->rx_unwrap_buff.skb)
+ goto free_mem;
+
+ skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
+ kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
+
+ kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->rx_urb)
+ goto free_mem;
+
+ kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->tx_urb)
+ goto free_mem;
+
+ kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!kingsun->speed_urb)
+ goto free_mem;
+
+ /* Initialize speed for dongle */
+ kingsun->new_speed = 9600;
+ err = ksdazzle_change_speed(kingsun, 9600);
+ if (err < 0)
+ goto free_mem;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ */
+ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
+ kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
+ if (!kingsun->irlap) {
+ err = -ENOMEM;
+ dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
+ goto free_mem;
+ }
+
+ /* Start reception. */
+ usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
+ usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
+ kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq,
+ kingsun, 1);
+ kingsun->rx_urb->status = 0;
+ err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ if (err) {
+ dev_err(&kingsun->usbdev->dev, "first urb-submit failed: %d\n", err);
+ goto close_irlap;
+ }
+
+ netif_start_queue(netdev);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - urbs allocated and ready to fill
+ - max rx packet known (in max_rx)
+ - unwrap state machine initialized, in state outside of any frame
+ - receive request in progress
+ - IrLAP layer started, about to hand over packets to send
+ */
+
+ return 0;
+
+ close_irlap:
+ irlap_close(kingsun->irlap);
+ free_mem:
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+ if (kingsun->rx_unwrap_buff.skb) {
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ }
+ return err;
+}
+
+/*
+ * Function ksdazzle_net_close (dev)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int ksdazzle_net_close(struct net_device *netdev)
+{
+ struct ksdazzle_cb *kingsun = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+
+ kfree_skb(kingsun->rx_unwrap_buff.skb);
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->rx_unwrap_buff.head = NULL;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->receiving = 0;
+
+ /* Stop and remove instance of IrLAP */
+ irlap_close(kingsun->irlap);
+
+ kingsun->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
+ struct ksdazzle_cb *kingsun = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(kingsun->netdev))
+ return ksdazzle_change_speed(kingsun,
+ irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(kingsun->netdev))
+ irda_device_set_media_busy(kingsun->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = kingsun->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops ksdazzle_ops = {
+ .ndo_start_xmit = ksdazzle_hard_xmit,
+ .ndo_open = ksdazzle_net_open,
+ .ndo_stop = ksdazzle_net_close,
+ .ndo_do_ioctl = ksdazzle_net_ioctl,
+};
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ */
+static int ksdazzle_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *interface;
+ struct usb_endpoint_descriptor *endpoint;
+
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ksdazzle_cb *kingsun = NULL;
+ struct net_device *net = NULL;
+ int ret = -ENOMEM;
+ int pipe, maxp_in, maxp_out;
+ __u8 ep_in;
+ __u8 ep_out;
+
+ /* Check that there really are two interrupt endpoints. Check based on the
+ one in drivers/usb/input/usbmouse.c
+ */
+ interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints != 2) {
+ dev_err(&intf->dev, "ksdazzle: expected 2 endpoints, found %d\n",
+ interface->desc.bNumEndpoints);
+ return -ENODEV;
+ }
+ endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
+ if (!usb_endpoint_is_int_in(endpoint)) {
+ dev_err(&intf->dev,
+ "ksdazzle: endpoint 0 is not interrupt IN\n");
+ return -ENODEV;
+ }
+
+ ep_in = endpoint->bEndpointAddress;
+ pipe = usb_rcvintpipe(dev, ep_in);
+ maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+ if (maxp_in > 255 || maxp_in <= 1) {
+ dev_err(&intf->dev,
+ "ksdazzle: endpoint 0 has max packet size %d not in range [2..255]\n",
+ maxp_in);
+ return -ENODEV;
+ }
+
+ endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
+ if (!usb_endpoint_is_int_out(endpoint)) {
+ dev_err(&intf->dev,
+ "ksdazzle: endpoint 1 is not interrupt OUT\n");
+ return -ENODEV;
+ }
+
+ ep_out = endpoint->bEndpointAddress;
+ pipe = usb_sndintpipe(dev, ep_out);
+ maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*kingsun));
+ if (!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ kingsun = netdev_priv(net);
+ kingsun->netdev = net;
+ kingsun->usbdev = dev;
+ kingsun->ep_in = ep_in;
+ kingsun->ep_out = ep_out;
+ kingsun->irlap = NULL;
+ kingsun->tx_urb = NULL;
+ kingsun->tx_buf_clear = NULL;
+ kingsun->tx_buf_clear_used = 0;
+ kingsun->tx_buf_clear_sent = 0;
+
+ kingsun->rx_urb = NULL;
+ kingsun->rx_buf = NULL;
+ kingsun->rx_unwrap_buff.in_frame = FALSE;
+ kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
+ kingsun->rx_unwrap_buff.skb = NULL;
+ kingsun->receiving = 0;
+ spin_lock_init(&kingsun->lock);
+
+ kingsun->speed_setuprequest = NULL;
+ kingsun->speed_urb = NULL;
+ kingsun->speedparams.baudrate = 0;
+
+ /* Allocate input buffer */
+ kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL);
+ if (!kingsun->rx_buf)
+ goto free_mem;
+
+ /* Allocate output buffer */
+ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL);
+ if (!kingsun->tx_buf_clear)
+ goto free_mem;
+
+ /* Allocate and initialize speed setup packet */
+ kingsun->speed_setuprequest =
+ kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!kingsun->speed_setuprequest)
+ goto free_mem;
+ kingsun->speed_setuprequest->bRequestType =
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND;
+ kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200);
+ kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001);
+ kingsun->speed_setuprequest->wLength =
+ cpu_to_le16(sizeof(struct ksdazzle_speedparams));
+
+ printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&kingsun->qos);
+
+ /* Baud rates known to be supported. Please uncomment if devices (other
+ than a SonyEriccson K300 phone) can be shown to support higher speeds
+ with this dongle.
+ */
+ kingsun->qos.baud_rate.bits =
+ IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
+ kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
+ irda_qos_bits_to_value(&kingsun->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &ksdazzle_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto free_mem;
+
+ dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, kingsun);
+
+ /* Situation at this point:
+ - all work buffers allocated
+ - setup requests pre-filled
+ - urbs not allocated, set to NULL
+ - max rx packet known (is KINGSUN_FIFO_SIZE)
+ - unwrap state machine (partially) initialized, but skb == NULL
+ */
+
+ return 0;
+
+ free_mem:
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_buf);
+ free_netdev(net);
+ err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void ksdazzle_disconnect(struct usb_interface *intf)
+{
+ struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
+
+ if (!kingsun)
+ return;
+
+ unregister_netdev(kingsun->netdev);
+
+ /* Mop up receive && transmit urb's */
+ usb_kill_urb(kingsun->speed_urb);
+ usb_free_urb(kingsun->speed_urb);
+ kingsun->speed_urb = NULL;
+
+ usb_kill_urb(kingsun->tx_urb);
+ usb_free_urb(kingsun->tx_urb);
+ kingsun->tx_urb = NULL;
+
+ usb_kill_urb(kingsun->rx_urb);
+ usb_free_urb(kingsun->rx_urb);
+ kingsun->rx_urb = NULL;
+
+ kfree(kingsun->speed_setuprequest);
+ kfree(kingsun->tx_buf_clear);
+ kfree(kingsun->rx_buf);
+ free_netdev(kingsun->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
+
+ netif_device_detach(kingsun->netdev);
+ if (kingsun->speed_urb != NULL)
+ usb_kill_urb(kingsun->speed_urb);
+ if (kingsun->tx_urb != NULL)
+ usb_kill_urb(kingsun->tx_urb);
+ if (kingsun->rx_urb != NULL)
+ usb_kill_urb(kingsun->rx_urb);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int ksdazzle_resume(struct usb_interface *intf)
+{
+ struct ksdazzle_cb *kingsun = usb_get_intfdata(intf);
+
+ if (kingsun->rx_urb != NULL) {
+ /* Setup request already filled in ksdazzle_probe */
+ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
+ }
+ netif_device_attach(kingsun->netdev);
+
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "ksdazzle-sir",
+ .probe = ksdazzle_probe,
+ .disconnect = ksdazzle_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = ksdazzle_suspend,
+ .resume = ksdazzle_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
+
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/litelink-sir.c b/drivers/staging/irda/drivers/litelink-sir.c
new file mode 100644
index 000000000000..8eefcb44bac3
--- /dev/null
+++ b/drivers/staging/irda/drivers/litelink-sir.c
@@ -0,0 +1,199 @@
+/*********************************************************************
+ *
+ * Filename: litelink.c
+ * Version: 1.1
+ * Description: Driver for the Parallax LiteLink dongle
+ * Status: Stable
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri May 7 12:50:33 1999
+ * Modified at: Fri Dec 17 09:14:23 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+/*
+ * Modified at: Thu Jan 15 2003
+ * Modified by: Eugene Crosser <crosser@average.org>
+ *
+ * Convert to "new" IRDA infrastructure for kernel 2.6
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static int litelink_open(struct sir_dev *dev);
+static int litelink_close(struct sir_dev *dev);
+static int litelink_change_speed(struct sir_dev *dev, unsigned speed);
+static int litelink_reset(struct sir_dev *dev);
+
+/* These are the baudrates supported - 9600 must be last one! */
+static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
+
+static struct dongle_driver litelink = {
+ .owner = THIS_MODULE,
+ .driver_name = "Parallax LiteLink",
+ .type = IRDA_LITELINK_DONGLE,
+ .open = litelink_open,
+ .close = litelink_close,
+ .reset = litelink_reset,
+ .set_speed = litelink_change_speed,
+};
+
+static int __init litelink_sir_init(void)
+{
+ return irda_register_dongle(&litelink);
+}
+
+static void __exit litelink_sir_cleanup(void)
+{
+ irda_unregister_dongle(&litelink);
+}
+
+static int litelink_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power up dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Set the speeds we can accept */
+ qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600;
+ qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int litelink_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function litelink_change_speed (task)
+ *
+ * Change speed of the Litelink dongle. To cycle through the available
+ * baud rates, pulse RTS low for a few ms.
+ */
+static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ int i;
+
+ /* dongle already reset by irda-thread - current speed (dongle and
+ * port) is the default speed (115200 for litelink!)
+ */
+
+ /* Cycle through avaiable baudrates until we reach the correct one */
+ for (i = 0; baud_rates[i] != speed; i++) {
+
+ /* end-of-list reached due to invalid speed request */
+ if (baud_rates[i] == 9600)
+ break;
+
+ /* Set DTR, clear RTS */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+ }
+
+ dev->speed = baud_rates[i];
+
+ /* invalid baudrate should not happen - but if, we return -EINVAL and
+ * the dongle configured for 9600 so the stack has a chance to recover
+ */
+
+ return (dev->speed == speed) ? 0 : -EINVAL;
+}
+
+/*
+ * Function litelink_reset (task)
+ *
+ * Reset the Litelink type dongle.
+ *
+ */
+static int litelink_reset(struct sir_dev *dev)
+{
+ /* probably the power-up can be dropped here, but with only
+ * 15 usec delay it's not worth the risk unless somebody with
+ * the hardware confirms it doesn't break anything...
+ */
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Clear RTS to reset dongle */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Sleep a minimum of 15 us */
+ udelay(MIN_DELAY);
+
+ /* This dongles speed defaults to 115200 bps */
+ dev->speed = 115200;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Parallax Litelink dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
+
+/*
+ * Function init_module (void)
+ *
+ * Initialize Litelink module
+ *
+ */
+module_init(litelink_sir_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ * Cleanup Litelink module
+ *
+ */
+module_exit(litelink_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/ma600-sir.c b/drivers/staging/irda/drivers/ma600-sir.c
new file mode 100644
index 000000000000..a764817b47f1
--- /dev/null
+++ b/drivers/staging/irda/drivers/ma600-sir.c
@@ -0,0 +1,253 @@
+/*********************************************************************
+ *
+ * Filename: ma600.c
+ * Version: 0.1
+ * Description: Implementation of the MA600 dongle
+ * Status: Experimental.
+ * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95
+ * Created at: Sat Jun 10 20:02:35 2000
+ * Modified at: Sat Aug 16 09:34:13 2003
+ * Modified by: Martin Diehl <mad@mdiehl.de> (modified for new sir_dev)
+ *
+ * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing
+ * information on the MA600 dongle
+ *
+ * Copyright (c) 2000 Leung, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int ma600_open(struct sir_dev *);
+static int ma600_close(struct sir_dev *);
+static int ma600_change_speed(struct sir_dev *, unsigned);
+static int ma600_reset(struct sir_dev *);
+
+/* control byte for MA600 */
+#define MA600_9600 0x00
+#define MA600_19200 0x01
+#define MA600_38400 0x02
+#define MA600_57600 0x03
+#define MA600_115200 0x04
+#define MA600_DEV_ID1 0x05
+#define MA600_DEV_ID2 0x06
+#define MA600_2400 0x08
+
+static struct dongle_driver ma600 = {
+ .owner = THIS_MODULE,
+ .driver_name = "MA600",
+ .type = IRDA_MA600_DONGLE,
+ .open = ma600_open,
+ .close = ma600_close,
+ .reset = ma600_reset,
+ .set_speed = ma600_change_speed,
+};
+
+
+static int __init ma600_sir_init(void)
+{
+ return irda_register_dongle(&ma600);
+}
+
+static void __exit ma600_sir_cleanup(void)
+{
+ irda_unregister_dongle(&ma600);
+}
+
+/*
+ Power on:
+ (0) Clear RTS and DTR for 1 second
+ (1) Set RTS and DTR for 1 second
+ (2) 9600 bps now
+ Note: assume RTS, DTR are clear before
+*/
+static int ma600_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Explicitly set the speeds we can accept */
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400
+ |IR_57600|IR_115200;
+ /* Hm, 0x01 means 10ms - for >= 1ms we would need 0x07 */
+ qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int ma600_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+static __u8 get_control_byte(__u32 speed)
+{
+ __u8 byte;
+
+ switch (speed) {
+ default:
+ case 115200:
+ byte = MA600_115200;
+ break;
+ case 57600:
+ byte = MA600_57600;
+ break;
+ case 38400:
+ byte = MA600_38400;
+ break;
+ case 19200:
+ byte = MA600_19200;
+ break;
+ case 9600:
+ byte = MA600_9600;
+ break;
+ case 2400:
+ byte = MA600_2400;
+ break;
+ }
+
+ return byte;
+}
+
+/*
+ * Function ma600_change_speed (dev, speed)
+ *
+ * Set the speed for the MA600 type dongle.
+ *
+ * The dongle has already been reset to a known state (dongle default)
+ * We cycle through speeds by pulsing RTS low and then high.
+ */
+
+/*
+ * Function ma600_change_speed (dev, speed)
+ *
+ * Set the speed for the MA600 type dongle.
+ *
+ * Algorithm
+ * 1. Reset (already done by irda thread state machine)
+ * 2. clear RTS, set DTR and wait for 1ms
+ * 3. send Control Byte to the MA600 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 10 msec)
+ * 4. set RTS, set DTR (return to NORMAL Operation)
+ * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here
+ * after
+ */
+
+/* total delays are only about 20ms - let's just sleep for now to
+ * avoid the state machine complexity before we get things working
+ */
+
+static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ u8 byte;
+
+ pr_debug("%s(), speed=%d (was %d)\n", __func__,
+ speed, dev->speed);
+
+ /* dongle already reset, dongle and port at default speed (9600) */
+
+ /* Set RTS low for 1 ms */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ mdelay(1);
+
+ /* Write control byte */
+ byte = get_control_byte(speed);
+ sirdev_raw_write(dev, &byte, sizeof(byte));
+
+ /* Wait at least 10ms: fake wait_until_sent - 10 bits at 9600 baud*/
+ msleep(15); /* old ma600 uses 15ms */
+
+#if 1
+ /* read-back of the control byte. ma600 is the first dongle driver
+ * which uses this so there might be some unidentified issues.
+ * Disable this in case of problems with readback.
+ */
+
+ sirdev_raw_read(dev, &byte, sizeof(byte));
+ if (byte != get_control_byte(speed)) {
+ net_warn_ratelimited("%s(): bad control byte read-back %02x != %02x\n",
+ __func__, (unsigned)byte,
+ (unsigned)get_control_byte(speed));
+ return -1;
+ }
+ else
+ pr_debug("%s() control byte write read OK\n", __func__);
+#endif
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 10ms */
+ msleep(10);
+
+ /* dongle is now switched to the new speed */
+ dev->speed = speed;
+
+ return 0;
+}
+
+/*
+ * Function ma600_reset (dev)
+ *
+ * This function resets the ma600 dongle.
+ *
+ * Algorithm:
+ * 0. DTR=0, RTS=1 and wait 10 ms
+ * 1. DTR=1, RTS=1 and wait 10 ms
+ * 2. 9600 bps now
+ */
+
+/* total delays are only about 20ms - let's just sleep for now to
+ * avoid the state machine complexity before we get things working
+ */
+
+static int ma600_reset(struct sir_dev *dev)
+{
+ /* Reset the dongle : set DTR low for 10 ms */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+ msleep(10);
+
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ msleep(10);
+
+ dev->speed = 9600; /* That's the dongle-default */
+
+ return 0;
+}
+
+MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95");
+MODULE_DESCRIPTION("MA600 dongle driver version 0.1");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */
+
+module_init(ma600_sir_init);
+module_exit(ma600_sir_cleanup);
+
diff --git a/drivers/staging/irda/drivers/mcp2120-sir.c b/drivers/staging/irda/drivers/mcp2120-sir.c
new file mode 100644
index 000000000000..2e33f91bfe8f
--- /dev/null
+++ b/drivers/staging/irda/drivers/mcp2120-sir.c
@@ -0,0 +1,224 @@
+/*********************************************************************
+ *
+ *
+ * Filename: mcp2120.c
+ * Version: 1.0
+ * Description: Implementation for the MCP2120 (Microchip)
+ * Status: Experimental.
+ * Author: Felix Tang (tangf@eyetap.org)
+ * Created at: Sun Mar 31 19:32:12 EST 2002
+ * Based on code by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 2002 Felix Tang, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int mcp2120_reset(struct sir_dev *dev);
+static int mcp2120_open(struct sir_dev *dev);
+static int mcp2120_close(struct sir_dev *dev);
+static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
+
+#define MCP2120_9600 0x87
+#define MCP2120_19200 0x8B
+#define MCP2120_38400 0x85
+#define MCP2120_57600 0x83
+#define MCP2120_115200 0x81
+
+#define MCP2120_COMMIT 0x11
+
+static struct dongle_driver mcp2120 = {
+ .owner = THIS_MODULE,
+ .driver_name = "Microchip MCP2120",
+ .type = IRDA_MCP2120_DONGLE,
+ .open = mcp2120_open,
+ .close = mcp2120_close,
+ .reset = mcp2120_reset,
+ .set_speed = mcp2120_change_speed,
+};
+
+static int __init mcp2120_sir_init(void)
+{
+ return irda_register_dongle(&mcp2120);
+}
+
+static void __exit mcp2120_sir_cleanup(void)
+{
+ irda_unregister_dongle(&mcp2120);
+}
+
+static int mcp2120_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* seems no explicit power-on required here and reset switching it on anyway */
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01;
+ irda_qos_bits_to_value(qos);
+
+ return 0;
+}
+
+static int mcp2120_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ /* reset and inhibit mcp2120 */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ // sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function mcp2120_change_speed (dev, speed)
+ *
+ * Set the speed for the MCP2120.
+ *
+ */
+
+#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
+
+static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 control[2];
+ static int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+ /* Set DTR to enter command mode */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+ udelay(500);
+
+ ret = 0;
+ switch (speed) {
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall through */
+ case 9600:
+ control[0] = MCP2120_9600;
+ //printk("mcp2120 9600\n");
+ break;
+ case 19200:
+ control[0] = MCP2120_19200;
+ //printk("mcp2120 19200\n");
+ break;
+ case 34800:
+ control[0] = MCP2120_38400;
+ //printk("mcp2120 38400\n");
+ break;
+ case 57600:
+ control[0] = MCP2120_57600;
+ //printk("mcp2120 57600\n");
+ break;
+ case 115200:
+ control[0] = MCP2120_115200;
+ //printk("mcp2120 115200\n");
+ break;
+ }
+ control[1] = MCP2120_COMMIT;
+
+ /* Write control bytes */
+ sirdev_raw_write(dev, control, 2);
+ dev->speed = speed;
+
+ state = MCP2120_STATE_WAIT_SPEED;
+ delay = 100;
+ //printk("mcp2120_change_speed: dongle_speed\n");
+ break;
+
+ case MCP2120_STATE_WAIT_SPEED:
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ //printk("mcp2120_change_speed: mcp_wait\n");
+ break;
+
+ default:
+ net_err_ratelimited("%s(), undefine state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function mcp2120_reset (driver)
+ *
+ * This function resets the mcp2120 dongle.
+ *
+ * Info: -set RTS to reset mcp2120
+ * -set DTR to set mcp2120 software command mode
+ * -mcp2120 defaults to 9600 baud after reset
+ *
+ * Algorithm:
+ * 0. Set RTS to reset mcp2120.
+ * 1. Clear RTS and wait for device reset timer of 30 ms (max).
+ *
+ */
+
+#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
+#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
+
+static int mcp2120_reset(struct sir_dev *dev)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ int ret = 0;
+
+ switch (state) {
+ case SIRDEV_STATE_DONGLE_RESET:
+ //printk("mcp2120_reset: dongle_reset\n");
+ /* Reset dongle by setting RTS*/
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ state = MCP2120_STATE_WAIT1_RESET;
+ delay = 50;
+ break;
+
+ case MCP2120_STATE_WAIT1_RESET:
+ //printk("mcp2120_reset: mcp2120_wait1\n");
+ /* clear RTS and wait for at least 30 ms. */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ state = MCP2120_STATE_WAIT2_RESET;
+ delay = 50;
+ break;
+
+ case MCP2120_STATE_WAIT2_RESET:
+ //printk("mcp2120_reset mcp2120_wait2\n");
+ /* Go back to normal mode */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+ break;
+
+ default:
+ net_err_ratelimited("%s(), undefined state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
+MODULE_DESCRIPTION("Microchip MCP2120");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
+
+module_init(mcp2120_sir_init);
+module_exit(mcp2120_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/mcs7780.c b/drivers/staging/irda/drivers/mcs7780.c
new file mode 100644
index 000000000000..c3f0b254b344
--- /dev/null
+++ b/drivers/staging/irda/drivers/mcs7780.c
@@ -0,0 +1,987 @@
+/*****************************************************************************
+*
+* Filename: mcs7780.c
+* Version: 0.4-alpha
+* Description: Irda MosChip USB Dongle Driver
+* Authors: Lukasz Stelmach <stlman@poczta.fm>
+* Brian Pugh <bpugh@cs.pdx.edu>
+* Judy Fischbach <jfisch@cs.pdx.edu>
+*
+* Based on stir4200 driver, but some things done differently.
+* Based on earlier driver by Paul Stewart <stewart@parc.com>
+*
+* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+* Copyright (C) 2005, Lukasz Stelmach <stlman@poczta.fm>
+* Copyright (C) 2005, Brian Pugh <bpugh@cs.pdx.edu>
+* Copyright (C) 2005, Judy Fischbach <jfisch@cs.pdx.edu>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * MCS7780 is a simple USB to IrDA bridge by MosChip. It is neither
+ * compatibile with irda-usb nor with stir4200. Although it is quite
+ * similar to the later as far as general idea of operation is concerned.
+ * That is it requires the software to do all the framing job at SIR speeds.
+ * The hardware does take care of the framing at MIR and FIR speeds.
+ * It supports all speeds from 2400 through 4Mbps
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/crc32.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#include "mcs7780.h"
+
+#define MCS_VENDOR_ID 0x9710
+#define MCS_PRODUCT_ID 0x7780
+
+static const struct usb_device_id mcs_table[] = {
+ /* MosChip Corp., MCS7780 FIR-USB Adapter */
+ {USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)},
+ {},
+};
+
+MODULE_AUTHOR("Brian Pugh <bpugh@cs.pdx.edu>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for MosChip MCS7780");
+MODULE_VERSION("0.3alpha");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(usb, mcs_table);
+
+static int qos_mtt_bits = 0x07 /* > 1ms */ ;
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+static int receive_mode = 0x1;
+module_param(receive_mode, int, 0);
+MODULE_PARM_DESC(receive_mode,
+ "Receive mode of the device (1:fast, 0:slow, default:1)");
+
+static int sir_tweak = 1;
+module_param(sir_tweak, int, 0444);
+MODULE_PARM_DESC(sir_tweak,
+ "Default pulse width (1:1.6us, 0:3/16 bit, default:1).");
+
+static int transceiver_type = MCS_TSC_VISHAY;
+module_param(transceiver_type, int, 0444);
+MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h.");
+
+static struct usb_driver mcs_driver = {
+ .name = "mcs7780",
+ .probe = mcs_probe,
+ .disconnect = mcs_disconnect,
+ .id_table = mcs_table,
+};
+
+/* speed flag selection by direct addressing.
+addr = (speed >> 8) & 0x0f
+
+0x1 57600 0x2 115200 0x4 1152000 0x5 9600
+0x6 38400 0x9 2400 0xa 576000 0xb 19200
+
+4Mbps (or 2400) must be checked separately. Since it also has
+to be programmed in a different manner that is not a big problem.
+*/
+static __u16 mcs_speed_set[16] = { 0,
+ MCS_SPEED_57600,
+ MCS_SPEED_115200,
+ 0,
+ MCS_SPEED_1152000,
+ MCS_SPEED_9600,
+ MCS_SPEED_38400,
+ 0, 0,
+ MCS_SPEED_2400,
+ MCS_SPEED_576000,
+ MCS_SPEED_19200,
+ 0, 0, 0,
+};
+
+/* Set given 16 bit register with a 16 bit value. Send control message
+ * to set dongle register. */
+static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
+{
+ struct usb_device *dev = mcs->usbdev;
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
+ MCS_WR_RTYPE, val, reg, NULL, 0,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+}
+
+/* Get 16 bit register value. Send contol message to read dongle register. */
+static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
+{
+ struct usb_device *dev = mcs->usbdev;
+ void *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+ memcpy(val, dmabuf, sizeof(__u16));
+ kfree(dmabuf);
+
+ return ret;
+}
+
+/* Setup a communication between mcs7780 and TFDU chips. It is described
+ * in more detail in the data sheet. The setup sequence puts the the
+ * vishay tranceiver into high speed mode. It will also receive SIR speed
+ * packets but at reduced sensitivity.
+ */
+
+/* 0: OK 1:ERROR */
+static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
+{
+ int ret = 0;
+ __u16 rval;
+
+ /* mcs_get_reg should read exactly two bytes from the dongle */
+ ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
+ if (unlikely(ret != 2)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* The MCS_XCVR_CONF bit puts the transceiver into configuration
+ * mode. The MCS_MODE0 bit must start out high (1) and then
+ * transition to low and the MCS_STFIR and MCS_MODE1 bits must
+ * be low.
+ */
+ rval |= (MCS_MODE0 | MCS_XCVR_CONF);
+ rval &= ~MCS_STFIR;
+ rval &= ~MCS_MODE1;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ rval &= ~MCS_MODE0;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ rval &= ~MCS_XCVR_CONF;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ ret = 0;
+error:
+ return ret;
+}
+
+/* Setup a communication between mcs7780 and agilent chip. */
+static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs)
+{
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
+ return 1;
+}
+
+/* Setup a communication between mcs7780 and sharp chip. */
+static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs)
+{
+ net_warn_ratelimited("This transceiver type is not supported yet\n");
+ return 1;
+}
+
+/* Common setup for all transceivers */
+static inline int mcs_setup_transceiver(struct mcs_cb *mcs)
+{
+ int ret = 0;
+ __u16 rval;
+ const char *msg;
+
+ msg = "Basic transceiver setup error";
+
+ /* read value of MODE Register, set the DRIVER and RESET bits
+ * and write value back out to MODE Register
+ */
+ ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ if(unlikely(ret != 2))
+ goto error;
+ rval |= MCS_DRIVER; /* put the mcs7780 into configuration mode. */
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if(unlikely(ret))
+ goto error;
+
+ rval = 0; /* set min pulse width to 0 initially. */
+ ret = mcs_set_reg(mcs, MCS_MINRXPW_REG, rval);
+ if(unlikely(ret))
+ goto error;
+
+ ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ if(unlikely(ret != 2))
+ goto error;
+
+ rval &= ~MCS_FIR; /* turn off fir mode. */
+ if(mcs->sir_tweak)
+ rval |= MCS_SIR16US; /* 1.6us pulse width */
+ else
+ rval &= ~MCS_SIR16US; /* 3/16 bit time pulse width */
+
+ /* make sure ask mode and back to back packets are off. */
+ rval &= ~(MCS_BBTG | MCS_ASK);
+
+ rval &= ~MCS_SPEED_MASK;
+ rval |= MCS_SPEED_9600; /* make sure initial speed is 9600. */
+ mcs->speed = 9600;
+ mcs->new_speed = 0; /* new_speed is set to 0 */
+ rval &= ~MCS_PLLPWDN; /* disable power down. */
+
+ /* make sure device determines direction and that the auto send sip
+ * pulse are on.
+ */
+ rval |= MCS_DTD | MCS_SIPEN;
+
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if(unlikely(ret))
+ goto error;
+
+ msg = "transceiver model specific setup error";
+ switch (mcs->transceiver_type) {
+ case MCS_TSC_VISHAY:
+ ret = mcs_setup_transceiver_vishay(mcs);
+ break;
+
+ case MCS_TSC_SHARP:
+ ret = mcs_setup_transceiver_sharp(mcs);
+ break;
+
+ case MCS_TSC_AGILENT:
+ ret = mcs_setup_transceiver_agilent(mcs);
+ break;
+
+ default:
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
+ ret = 1;
+ }
+ if (unlikely(ret))
+ goto error;
+
+ /* If transceiver is not SHARP, then if receive mode set
+ * on the RXFAST bit in the XCVR Register otherwise unset it
+ */
+ if (mcs->transceiver_type != MCS_TSC_SHARP) {
+
+ ret = mcs_get_reg(mcs, MCS_XCVR_REG, &rval);
+ if (unlikely(ret != 2))
+ goto error;
+ if (mcs->receive_mode)
+ rval |= MCS_RXFAST;
+ else
+ rval &= ~MCS_RXFAST;
+ ret = mcs_set_reg(mcs, MCS_XCVR_REG, rval);
+ if (unlikely(ret))
+ goto error;
+ }
+
+ msg = "transceiver reset";
+
+ ret = mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ if (unlikely(ret != 2))
+ goto error;
+
+ /* reset the mcs7780 so all changes take effect. */
+ rval &= ~MCS_RESET;
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if (unlikely(ret))
+ goto error;
+ else
+ return ret;
+
+error:
+ net_err_ratelimited("%s\n", msg);
+ return ret;
+}
+
+/* Wraps the data in format for SIR */
+static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf)
+{
+ int wraplen;
+
+ /* 2: full frame length, including "the length" */
+ wraplen = async_wrap_skb(skb, buf + 2, 4094);
+
+ wraplen += 2;
+ buf[0] = wraplen & 0xff;
+ buf[1] = (wraplen >> 8) & 0xff;
+
+ return wraplen;
+}
+
+/* Wraps the data in format for FIR */
+static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ unsigned int len = 0;
+ __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
+
+ /* add 2 bytes for length value and 4 bytes for fcs. */
+ len = skb->len + 6;
+
+ /* The mcs7780 requires that the first two bytes are the packet
+ * length in little endian order. Note: the length value includes
+ * the two bytes for the length value itself.
+ */
+ buf[0] = len & 0xff;
+ buf[1] = (len >> 8) & 0xff;
+ /* copy the data into the tx buffer. */
+ skb_copy_from_linear_data(skb, buf + 2, skb->len);
+ /* put the fcs in the last four bytes in little endian order. */
+ buf[len - 4] = fcs & 0xff;
+ buf[len - 3] = (fcs >> 8) & 0xff;
+ buf[len - 2] = (fcs >> 16) & 0xff;
+ buf[len - 1] = (fcs >> 24) & 0xff;
+
+ return len;
+}
+
+/* Wraps the data in format for MIR */
+static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ __u16 fcs = 0;
+ int len = skb->len + 4;
+
+ fcs = ~(irda_calc_crc16(~fcs, skb->data, skb->len));
+ /* put the total packet length in first. Note: packet length
+ * value includes the two bytes that hold the packet length
+ * itself.
+ */
+ buf[0] = len & 0xff;
+ buf[1] = (len >> 8) & 0xff;
+ /* copy the data */
+ skb_copy_from_linear_data(skb, buf + 2, skb->len);
+ /* put the fcs in last two bytes in little endian order. */
+ buf[len - 2] = fcs & 0xff;
+ buf[len - 1] = (fcs >> 8) & 0xff;
+
+ return len;
+}
+
+/* Unwrap received packets at MIR speed. A 16 bit crc_ccitt checksum is
+ * used for the fcs. When performed over the entire packet the result
+ * should be GOOD_FCS = 0xf0b8. Hands the unwrapped data off to the IrDA
+ * layer via a sk_buff.
+ */
+static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
+{
+ __u16 fcs;
+ int new_len;
+ struct sk_buff *skb;
+
+ /* Assume that the frames are going to fill a single packet
+ * rather than span multiple packets.
+ */
+
+ new_len = len - 2;
+ if(unlikely(new_len <= 0)) {
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
+ ++mcs->netdev->stats.rx_errors;
+ ++mcs->netdev->stats.rx_length_errors;
+ return;
+ }
+ fcs = 0;
+ fcs = irda_calc_crc16(~fcs, buf, len);
+
+ if(fcs != GOOD_FCS) {
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
+ mcs->netdev->stats.rx_errors++;
+ mcs->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = dev_alloc_skb(new_len + 1);
+ if(unlikely(!skb)) {
+ ++mcs->netdev->stats.rx_dropped;
+ return;
+ }
+
+ skb_reserve(skb, 1);
+ skb_copy_to_linear_data(skb, buf, new_len);
+ skb_put(skb, new_len);
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = mcs->netdev;
+
+ netif_rx(skb);
+
+ mcs->netdev->stats.rx_packets++;
+ mcs->netdev->stats.rx_bytes += new_len;
+}
+
+/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
+ * used for the fcs. Hands the unwrapped data off to the IrDA
+ * layer via a sk_buff.
+ */
+static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
+{
+ __u32 fcs;
+ int new_len;
+ struct sk_buff *skb;
+
+ /* Assume that the frames are going to fill a single packet
+ * rather than span multiple packets. This is most likely a false
+ * assumption.
+ */
+
+ new_len = len - 4;
+ if(unlikely(new_len <= 0)) {
+ net_err_ratelimited("%s short frame length %d\n",
+ mcs->netdev->name, new_len);
+ ++mcs->netdev->stats.rx_errors;
+ ++mcs->netdev->stats.rx_length_errors;
+ return;
+ }
+
+ fcs = ~(crc32_le(~0, buf, new_len));
+ if(fcs != get_unaligned_le32(buf + new_len)) {
+ net_err_ratelimited("crc error calc 0x%x len %d\n",
+ fcs, new_len);
+ mcs->netdev->stats.rx_errors++;
+ mcs->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = dev_alloc_skb(new_len + 1);
+ if(unlikely(!skb)) {
+ ++mcs->netdev->stats.rx_dropped;
+ return;
+ }
+
+ skb_reserve(skb, 1);
+ skb_copy_to_linear_data(skb, buf, new_len);
+ skb_put(skb, new_len);
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = mcs->netdev;
+
+ netif_rx(skb);
+
+ mcs->netdev->stats.rx_packets++;
+ mcs->netdev->stats.rx_bytes += new_len;
+}
+
+
+/* Allocates urbs for both receive and transmit.
+ * If alloc fails return error code 0 (fail) otherwise
+ * return error code 1 (success).
+ */
+static inline int mcs_setup_urbs(struct mcs_cb *mcs)
+{
+ mcs->rx_urb = NULL;
+
+ mcs->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!mcs->tx_urb)
+ return 0;
+
+ mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!mcs->rx_urb) {
+ usb_free_urb(mcs->tx_urb);
+ mcs->tx_urb = NULL;
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Sets up state to be initially outside frame, gets receive urb,
+ * sets status to successful and then submits the urb to start
+ * receiving the data.
+ */
+static inline int mcs_receive_start(struct mcs_cb *mcs)
+{
+ mcs->rx_buff.in_frame = FALSE;
+ mcs->rx_buff.state = OUTSIDE_FRAME;
+
+ usb_fill_bulk_urb(mcs->rx_urb, mcs->usbdev,
+ usb_rcvbulkpipe(mcs->usbdev, mcs->ep_in),
+ mcs->in_buf, 4096, mcs_receive_irq, mcs);
+
+ mcs->rx_urb->status = 0;
+ return usb_submit_urb(mcs->rx_urb, GFP_KERNEL);
+}
+
+/* Finds the in and out endpoints for the mcs control block */
+static inline int mcs_find_endpoints(struct mcs_cb *mcs,
+ struct usb_host_endpoint *ep, int epnum)
+{
+ int i;
+ int ret = 0;
+
+ /* If no place to store the endpoints just return */
+ if (!ep)
+ return ret;
+
+ /* cycle through all endpoints, find the first two that are DIR_IN */
+ for (i = 0; i < epnum; i++) {
+ if (ep[i].desc.bEndpointAddress & USB_DIR_IN)
+ mcs->ep_in = ep[i].desc.bEndpointAddress;
+ else
+ mcs->ep_out = ep[i].desc.bEndpointAddress;
+
+ /* MosChip says that the chip has only two bulk
+ * endpoints. Find one for each direction and move on.
+ */
+ if ((mcs->ep_in != 0) && (mcs->ep_out != 0)) {
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void mcs_speed_work(struct work_struct *work)
+{
+ struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
+ struct net_device *netdev = mcs->netdev;
+
+ mcs_speed_change(mcs);
+ netif_wake_queue(netdev);
+}
+
+/* Function to change the speed of the mcs7780. Fully supports SIR,
+ * MIR, and FIR speeds.
+ */
+static int mcs_speed_change(struct mcs_cb *mcs)
+{
+ int ret = 0;
+ int rst = 0;
+ int cnt = 0;
+ __u16 nspeed;
+ __u16 rval;
+
+ nspeed = mcs_speed_set[(mcs->new_speed >> 8) & 0x0f];
+
+ do {
+ mcs_get_reg(mcs, MCS_RESV_REG, &rval);
+ } while(cnt++ < 100 && (rval & MCS_IRINTX));
+
+ if (cnt > 100) {
+ net_err_ratelimited("unable to change speed\n");
+ ret = -EIO;
+ goto error;
+ }
+
+ mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+
+ /* MINRXPW values recommended by MosChip */
+ if (mcs->new_speed <= 115200) {
+ rval &= ~MCS_FIR;
+
+ if ((rst = (mcs->speed > 115200)))
+ mcs_set_reg(mcs, MCS_MINRXPW_REG, 0);
+
+ } else if (mcs->new_speed <= 1152000) {
+ rval &= ~MCS_FIR;
+
+ if ((rst = !(mcs->speed == 576000 || mcs->speed == 1152000)))
+ mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
+
+ } else {
+ rval |= MCS_FIR;
+
+ if ((rst = (mcs->speed != 4000000)))
+ mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
+
+ }
+
+ rval &= ~MCS_SPEED_MASK;
+ rval |= nspeed;
+
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+ if (unlikely(ret))
+ goto error;
+
+ if (rst)
+ switch (mcs->transceiver_type) {
+ case MCS_TSC_VISHAY:
+ ret = mcs_setup_transceiver_vishay(mcs);
+ break;
+
+ case MCS_TSC_SHARP:
+ ret = mcs_setup_transceiver_sharp(mcs);
+ break;
+
+ case MCS_TSC_AGILENT:
+ ret = mcs_setup_transceiver_agilent(mcs);
+ break;
+
+ default:
+ ret = 1;
+ net_warn_ratelimited("Unknown transceiver type: %d\n",
+ mcs->transceiver_type);
+ }
+ if (unlikely(ret))
+ goto error;
+
+ mcs_get_reg(mcs, MCS_MODE_REG, &rval);
+ rval &= ~MCS_RESET;
+ ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
+
+ mcs->speed = mcs->new_speed;
+error:
+ mcs->new_speed = 0;
+ return ret;
+}
+
+/* Ioctl calls not supported at this time. Can be an area of future work. */
+static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ /* struct if_irda_req *irq = (struct if_irda_req *)rq; */
+ /* struct mcs_cb *mcs = netdev_priv(netdev); */
+ int ret = 0;
+
+ switch (cmd) {
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* Network device is taken down, done by "ifconfig irda0 down" */
+static int mcs_net_close(struct net_device *netdev)
+{
+ int ret = 0;
+ struct mcs_cb *mcs = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ kfree_skb(mcs->rx_buff.skb);
+
+ /* kill and free the receive and transmit URBs */
+ usb_kill_urb(mcs->rx_urb);
+ usb_free_urb(mcs->rx_urb);
+ usb_kill_urb(mcs->tx_urb);
+ usb_free_urb(mcs->tx_urb);
+
+ /* Stop and remove instance of IrLAP */
+ if (mcs->irlap)
+ irlap_close(mcs->irlap);
+
+ mcs->irlap = NULL;
+ return ret;
+}
+
+/* Network device is taken up, done by "ifconfig irda0 up" */
+static int mcs_net_open(struct net_device *netdev)
+{
+ struct mcs_cb *mcs = netdev_priv(netdev);
+ char hwname[16];
+ int ret = 0;
+
+ ret = usb_clear_halt(mcs->usbdev,
+ usb_sndbulkpipe(mcs->usbdev, mcs->ep_in));
+ if (ret)
+ goto error1;
+ ret = usb_clear_halt(mcs->usbdev,
+ usb_rcvbulkpipe(mcs->usbdev, mcs->ep_out));
+ if (ret)
+ goto error1;
+
+ ret = mcs_setup_transceiver(mcs);
+ if (ret)
+ goto error1;
+
+ ret = -ENOMEM;
+
+ /* Initialize for SIR/FIR to copy data directly into skb. */
+ mcs->receiving = 0;
+ mcs->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ mcs->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!mcs->rx_buff.skb)
+ goto error1;
+
+ skb_reserve(mcs->rx_buff.skb, 1);
+ mcs->rx_buff.head = mcs->rx_buff.skb->data;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", mcs->usbdev->devnum);
+ mcs->irlap = irlap_open(netdev, &mcs->qos, hwname);
+ if (!mcs->irlap) {
+ net_err_ratelimited("mcs7780: irlap_open failed\n");
+ goto error2;
+ }
+
+ if (!mcs_setup_urbs(mcs))
+ goto error3;
+
+ ret = mcs_receive_start(mcs);
+ if (ret)
+ goto error4;
+
+ netif_start_queue(netdev);
+ return 0;
+
+error4:
+ usb_free_urb(mcs->rx_urb);
+ usb_free_urb(mcs->tx_urb);
+error3:
+ irlap_close(mcs->irlap);
+error2:
+ kfree_skb(mcs->rx_buff.skb);
+error1:
+ return ret;
+}
+
+/* Receive callback function. */
+static void mcs_receive_irq(struct urb *urb)
+{
+ __u8 *bytes;
+ struct mcs_cb *mcs = urb->context;
+ int i;
+ int ret;
+
+ if (!netif_running(mcs->netdev))
+ return;
+
+ if (urb->status)
+ return;
+
+ if (urb->actual_length > 0) {
+ bytes = urb->transfer_buffer;
+
+ /* MCS returns frames without BOF and EOF
+ * I assume it returns whole frames.
+ */
+ /* SIR speed */
+ if(mcs->speed < 576000) {
+ async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
+ &mcs->rx_buff, 0xc0);
+
+ for (i = 0; i < urb->actual_length; i++)
+ async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
+ &mcs->rx_buff, bytes[i]);
+
+ async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
+ &mcs->rx_buff, 0xc1);
+ }
+ /* MIR speed */
+ else if(mcs->speed == 576000 || mcs->speed == 1152000) {
+ mcs_unwrap_mir(mcs, urb->transfer_buffer,
+ urb->actual_length);
+ }
+ /* FIR speed */
+ else {
+ mcs_unwrap_fir(mcs, urb->transfer_buffer,
+ urb->actual_length);
+ }
+ }
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+/* Transmit callback function. */
+static void mcs_send_irq(struct urb *urb)
+{
+ struct mcs_cb *mcs = urb->context;
+ struct net_device *ndev = mcs->netdev;
+
+ if (unlikely(mcs->new_speed))
+ schedule_work(&mcs->work);
+ else
+ netif_wake_queue(ndev);
+}
+
+/* Transmit callback function. */
+static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ unsigned long flags;
+ struct mcs_cb *mcs;
+ int wraplen;
+ int ret = 0;
+
+ netif_stop_queue(ndev);
+ mcs = netdev_priv(ndev);
+
+ spin_lock_irqsave(&mcs->lock, flags);
+
+ mcs->new_speed = irda_get_next_speed(skb);
+ if (likely(mcs->new_speed == mcs->speed))
+ mcs->new_speed = 0;
+
+ /* SIR speed */
+ if(mcs->speed < 576000) {
+ wraplen = mcs_wrap_sir_skb(skb, mcs->out_buf);
+ }
+ /* MIR speed */
+ else if(mcs->speed == 576000 || mcs->speed == 1152000) {
+ wraplen = mcs_wrap_mir_skb(skb, mcs->out_buf);
+ }
+ /* FIR speed */
+ else {
+ wraplen = mcs_wrap_fir_skb(skb, mcs->out_buf);
+ }
+ usb_fill_bulk_urb(mcs->tx_urb, mcs->usbdev,
+ usb_sndbulkpipe(mcs->usbdev, mcs->ep_out),
+ mcs->out_buf, wraplen, mcs_send_irq, mcs);
+
+ if ((ret = usb_submit_urb(mcs->tx_urb, GFP_ATOMIC))) {
+ net_err_ratelimited("failed tx_urb: %d\n", ret);
+ switch (ret) {
+ case -ENODEV:
+ case -EPIPE:
+ break;
+ default:
+ mcs->netdev->stats.tx_errors++;
+ netif_start_queue(ndev);
+ }
+ } else {
+ mcs->netdev->stats.tx_packets++;
+ mcs->netdev->stats.tx_bytes += skb->len;
+ }
+
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&mcs->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops mcs_netdev_ops = {
+ .ndo_open = mcs_net_open,
+ .ndo_stop = mcs_net_close,
+ .ndo_start_xmit = mcs_hard_xmit,
+ .ndo_do_ioctl = mcs_net_ioctl,
+};
+
+/*
+ * This function is called by the USB subsystem for each new device in the
+ * system. Need to verify the device and if it is, then start handling it.
+ */
+static int mcs_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct net_device *ndev = NULL;
+ struct mcs_cb *mcs;
+ int ret = -ENOMEM;
+
+ ndev = alloc_irdadev(sizeof(*mcs));
+ if (!ndev)
+ goto error1;
+
+ pr_debug("MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum);
+
+ SET_NETDEV_DEV(ndev, &intf->dev);
+
+ ret = usb_reset_configuration(udev);
+ if (ret != 0) {
+ net_err_ratelimited("mcs7780: usb reset configuration failed\n");
+ goto error2;
+ }
+
+ mcs = netdev_priv(ndev);
+ mcs->usbdev = udev;
+ mcs->netdev = ndev;
+ spin_lock_init(&mcs->lock);
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&mcs->qos);
+
+ /* That's the Rx capability. */
+ mcs->qos.baud_rate.bits &=
+ IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200
+ | IR_576000 | IR_1152000 | (IR_4000000 << 8);
+
+
+ mcs->qos.min_turn_time.bits &= qos_mtt_bits;
+ irda_qos_bits_to_value(&mcs->qos);
+
+ /* Speed change work initialisation*/
+ INIT_WORK(&mcs->work, mcs_speed_work);
+
+ ndev->netdev_ops = &mcs_netdev_ops;
+
+ if (!intf->cur_altsetting) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+
+ ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint,
+ intf->cur_altsetting->desc.bNumEndpoints);
+ if (!ret) {
+ ret = -ENODEV;
+ goto error2;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret != 0)
+ goto error2;
+
+ pr_debug("IrDA: Registered MosChip MCS7780 device as %s\n",
+ ndev->name);
+
+ mcs->transceiver_type = transceiver_type;
+ mcs->sir_tweak = sir_tweak;
+ mcs->receive_mode = receive_mode;
+
+ usb_set_intfdata(intf, mcs);
+ return 0;
+
+error2:
+ free_netdev(ndev);
+
+error1:
+ return ret;
+}
+
+/* The current device is removed, the USB layer tells us to shut down. */
+static void mcs_disconnect(struct usb_interface *intf)
+{
+ struct mcs_cb *mcs = usb_get_intfdata(intf);
+
+ if (!mcs)
+ return;
+
+ cancel_work_sync(&mcs->work);
+
+ unregister_netdev(mcs->netdev);
+ free_netdev(mcs->netdev);
+
+ usb_set_intfdata(intf, NULL);
+ pr_debug("MCS7780 now disconnected.\n");
+}
+
+module_usb_driver(mcs_driver);
diff --git a/drivers/staging/irda/drivers/mcs7780.h b/drivers/staging/irda/drivers/mcs7780.h
new file mode 100644
index 000000000000..a6e8f7dbafc9
--- /dev/null
+++ b/drivers/staging/irda/drivers/mcs7780.h
@@ -0,0 +1,165 @@
+/*****************************************************************************
+*
+* Filename: mcs7780.h
+* Version: 0.2-alpha
+* Description: Irda MosChip USB Dongle
+* Status: Experimental
+* Authors: Lukasz Stelmach <stlman@poczta.fm>
+* Brian Pugh <bpugh@cs.pdx.edu>
+*
+* Copyright (C) 2005, Lukasz Stelmach <stlman@poczta.fm>
+* Copyright (C) 2005, Brian Pugh <bpugh@cs.pdx.edu>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+#ifndef _MCS7780_H
+#define _MCS7780_H
+
+#define MCS_MODE_SIR 0
+#define MCS_MODE_MIR 1
+#define MCS_MODE_FIR 2
+
+#define MCS_CTRL_TIMEOUT 500
+#define MCS_XMIT_TIMEOUT 500
+/* Possible transceiver types */
+#define MCS_TSC_VISHAY 0 /* Vishay TFD, default choice */
+#define MCS_TSC_AGILENT 1 /* Agilent 3602/3600 */
+#define MCS_TSC_SHARP 2 /* Sharp GP2W1000YP */
+
+/* Requests */
+#define MCS_RD_RTYPE 0xC0
+#define MCS_WR_RTYPE 0x40
+#define MCS_RDREQ 0x0F
+#define MCS_WRREQ 0x0E
+
+/* Register 0x00 */
+#define MCS_MODE_REG 0
+#define MCS_FIR ((__u16)0x0001)
+#define MCS_SIR16US ((__u16)0x0002)
+#define MCS_BBTG ((__u16)0x0004)
+#define MCS_ASK ((__u16)0x0008)
+#define MCS_PARITY ((__u16)0x0010)
+
+/* SIR/MIR speed constants */
+#define MCS_SPEED_SHIFT 5
+#define MCS_SPEED_MASK ((__u16)0x00E0)
+#define MCS_SPEED(x) ((x & MCS_SPEED_MASK) >> MCS_SPEED_SHIFT)
+#define MCS_SPEED_2400 ((0 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_9600 ((1 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_19200 ((2 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_38400 ((3 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_57600 ((4 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_115200 ((5 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_576000 ((6 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+#define MCS_SPEED_1152000 ((7 << MCS_SPEED_SHIFT) & MCS_SPEED_MASK)
+
+#define MCS_PLLPWDN ((__u16)0x0100)
+#define MCS_DRIVER ((__u16)0x0200)
+#define MCS_DTD ((__u16)0x0400)
+#define MCS_DIR ((__u16)0x0800)
+#define MCS_SIPEN ((__u16)0x1000)
+#define MCS_SENDSIP ((__u16)0x2000)
+#define MCS_CHGDIR ((__u16)0x4000)
+#define MCS_RESET ((__u16)0x8000)
+
+/* Register 0x02 */
+#define MCS_XCVR_REG 2
+#define MCS_MODE0 ((__u16)0x0001)
+#define MCS_STFIR ((__u16)0x0002)
+#define MCS_XCVR_CONF ((__u16)0x0004)
+#define MCS_RXFAST ((__u16)0x0008)
+/* TXCUR [6:4] */
+#define MCS_TXCUR_SHIFT 4
+#define MCS_TXCUR_MASK ((__u16)0x0070)
+#define MCS_TXCUR(x) ((x & MCS_TXCUR_MASK) >> MCS_TXCUR_SHIFT)
+#define MCS_SETTXCUR(x,y) \
+ ((x & ~MCS_TXCUR_MASK) | (y << MCS_TXCUR_SHIFT) & MCS_TXCUR_MASK)
+
+#define MCS_MODE1 ((__u16)0x0080)
+#define MCS_SMODE0 ((__u16)0x0100)
+#define MCS_SMODE1 ((__u16)0x0200)
+#define MCS_INVTX ((__u16)0x0400)
+#define MCS_INVRX ((__u16)0x0800)
+
+#define MCS_MINRXPW_REG 4
+
+#define MCS_RESV_REG 7
+#define MCS_IRINTX ((__u16)0x0001)
+#define MCS_IRINRX ((__u16)0x0002)
+
+struct mcs_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos;
+ unsigned int speed; /* Current speed */
+ unsigned int new_speed; /* new speed */
+
+ struct work_struct work; /* Change speed work */
+
+ struct sk_buff *tx_pending;
+ char in_buf[4096]; /* transmit/receive buffer */
+ char out_buf[4096]; /* transmit/receive buffer */
+ __u8 *fifo_status;
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ spinlock_t lock;
+ int receiving;
+
+ __u8 ep_in;
+ __u8 ep_out;
+
+ struct urb *rx_urb;
+ struct urb *tx_urb;
+
+ int transceiver_type;
+ int sir_tweak;
+ int receive_mode;
+};
+
+static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val);
+static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val);
+
+static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs);
+static inline int mcs_setup_transceiver_agilent(struct mcs_cb *mcs);
+static inline int mcs_setup_transceiver_sharp(struct mcs_cb *mcs);
+static inline int mcs_setup_transceiver(struct mcs_cb *mcs);
+static inline int mcs_wrap_sir_skb(struct sk_buff *skb, __u8 * buf);
+static unsigned mcs_wrap_fir_skb(const struct sk_buff *skb, __u8 *buf);
+static unsigned mcs_wrap_mir_skb(const struct sk_buff *skb, __u8 *buf);
+static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len);
+static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len);
+static inline int mcs_setup_urbs(struct mcs_cb *mcs);
+static inline int mcs_receive_start(struct mcs_cb *mcs);
+static inline int mcs_find_endpoints(struct mcs_cb *mcs,
+ struct usb_host_endpoint *ep, int epnum);
+
+static int mcs_speed_change(struct mcs_cb *mcs);
+
+static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
+static int mcs_net_close(struct net_device *netdev);
+static int mcs_net_open(struct net_device *netdev);
+
+static void mcs_receive_irq(struct urb *urb);
+static void mcs_send_irq(struct urb *urb);
+static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev);
+
+static int mcs_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+static void mcs_disconnect(struct usb_interface *intf);
+
+#endif /* _MCS7780_H */
diff --git a/drivers/staging/irda/drivers/nsc-ircc.c b/drivers/staging/irda/drivers/nsc-ircc.c
new file mode 100644
index 000000000000..7beae147be11
--- /dev/null
+++ b/drivers/staging/irda/drivers/nsc-ircc.c
@@ -0,0 +1,2410 @@
+/*********************************************************************
+ *
+ * Filename: nsc-ircc.c
+ * Version: 1.0
+ * Description: Driver for the NSC PC'108 and PC'338 IrDA chipsets
+ * Status: Stable.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sat Nov 7 21:43:15 1998
+ * Modified at: Wed Mar 1 11:29:34 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
+ * Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ * Notice that all functions that needs to access the chip in _any_
+ * way, must save BSR register on entry, and restore it on exit.
+ * It is _very_ important to follow this policy!
+ *
+ * __u8 bank;
+ *
+ * bank = inb(iobase+BSR);
+ *
+ * do_your_stuff_here();
+ *
+ * outb(bank, iobase+BSR);
+ *
+ * If you find bugs in this file, its very likely that the same bug
+ * will also be in w83977af_ir.c since the implementations are quite
+ * similar.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "nsc-ircc.h"
+
+#define CHIP_IO_EXTENT 8
+#define BROKEN_DONGLE_ID
+
+static char *driver_name = "nsc-ircc";
+
+/* Power Management */
+#define NSC_IRCC_DRIVER_NAME "nsc-ircc"
+static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int nsc_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver nsc_ircc_driver = {
+ .suspend = nsc_ircc_suspend,
+ .resume = nsc_ircc_resume,
+ .driver = {
+ .name = NSC_IRCC_DRIVER_NAME,
+ },
+};
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static int dongle_id;
+
+/* Use BIOS settions by default, but user may supply module parameters */
+static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0, 0 };
+
+static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
+#ifdef CONFIG_PNP
+static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
+#endif
+
+/* These are the known NSC chips */
+static nsc_chip_t chips[] = {
+/* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */
+ { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0,
+ nsc_ircc_probe_108, nsc_ircc_init_108 },
+ { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8,
+ nsc_ircc_probe_338, nsc_ircc_init_338 },
+ /* Contributed by Steffen Pingel - IBM X40 */
+ { "PC8738x", { 0x164e, 0x4e, 0x2e }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* Contributed by Jan Frey - IBM A30/A31 */
+ { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* IBM ThinkPads using PC8738x (T60/X60/Z60) */
+ { "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ /* IBM ThinkPads using PC8394T (T43/R52/?) */
+ { "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ { NULL }
+};
+
+static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL };
+
+static char *dongle_types[] = {
+ "Differential serial interface",
+ "Differential serial interface",
+ "Reserved",
+ "Reserved",
+ "Sharp RY5HD01",
+ "Reserved",
+ "Single-ended serial interface",
+ "Consumer-IR only",
+ "HP HSDL-2300, HP HSDL-3600/HSDL-3610",
+ "IBM31T1100 or Temic TFDS6000/TFDS6500",
+ "Reserved",
+ "Reserved",
+ "HP HSDL-1100/HSDL-2100",
+ "HP HSDL-1100/HSDL-2100",
+ "Supports SIR Mode only",
+ "No dongle connected",
+};
+
+/* PNP probing */
+static chipio_t pnp_info;
+static const struct pnp_device_id nsc_ircc_pnp_table[] = {
+ { .id = "NSC6001", .driver_data = 0 },
+ { .id = "HWPC224", .driver_data = 0 },
+ { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
+
+static struct pnp_driver nsc_ircc_pnp_driver = {
+#ifdef CONFIG_PNP
+ .name = "nsc-ircc",
+ .id_table = nsc_ircc_pnp_table,
+ .probe = nsc_ircc_pnp_probe,
+#endif
+};
+
+/* Some prototypes */
+static int nsc_ircc_open(chipio_t *info);
+static int nsc_ircc_close(struct nsc_ircc_cb *self);
+static int nsc_ircc_setup(chipio_t *info);
+static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
+static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self);
+static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase);
+static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
+static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
+static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
+static int nsc_ircc_read_dongle_id (int iobase);
+static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
+
+static int nsc_ircc_net_open(struct net_device *dev);
+static int nsc_ircc_net_close(struct net_device *dev);
+static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+/* Globals */
+static int pnp_registered;
+static int pnp_succeeded;
+
+/*
+ * Function nsc_ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init nsc_ircc_init(void)
+{
+ chipio_t info;
+ nsc_chip_t *chip;
+ int ret;
+ int cfg_base;
+ int cfg, id;
+ int reg;
+ int i = 0;
+
+ ret = platform_driver_register(&nsc_ircc_driver);
+ if (ret) {
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
+ return ret;
+ }
+
+ /* Register with PnP subsystem to detect disable ports */
+ ret = pnp_register_driver(&nsc_ircc_pnp_driver);
+
+ if (!ret)
+ pnp_registered = 1;
+
+ ret = -ENODEV;
+
+ /* Probe for all the NSC chipsets we know about */
+ for (chip = chips; chip->name ; chip++) {
+ pr_debug("%s(), Probing for %s ...\n", __func__,
+ chip->name);
+
+ /* Try all config registers for this chip */
+ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) {
+ cfg_base = chip->cfg[cfg];
+ if (!cfg_base)
+ continue;
+
+ /* Read index register */
+ reg = inb(cfg_base);
+ if (reg == 0xff) {
+ pr_debug("%s() no chip at 0x%03x\n",
+ __func__, cfg_base);
+ continue;
+ }
+
+ /* Read chip identification register */
+ outb(chip->cid_index, cfg_base);
+ id = inb(cfg_base+1);
+ if ((id & chip->cid_mask) == chip->cid_value) {
+ pr_debug("%s() Found %s chip, revision=%d\n",
+ __func__, chip->name,
+ id & ~chip->cid_mask);
+
+ /*
+ * If we found a correct PnP setting,
+ * we first try it.
+ */
+ if (pnp_succeeded) {
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = pnp_info.fir_base;
+ info.dma = pnp_info.dma;
+ info.irq = pnp_info.irq;
+
+ if (info.fir_base < 0x2000) {
+ net_info_ratelimited("%s, chip->init\n",
+ driver_name);
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(&info) >= 0)
+ ret = 0;
+ }
+
+ /*
+ * Opening based on PnP values failed.
+ * Let's fallback to user values, or probe
+ * the chip.
+ */
+ if (ret) {
+ pr_debug("%s, PnP init failed\n",
+ driver_name);
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 0x2000) {
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(&info) >= 0)
+ ret = 0;
+ }
+ i++;
+ } else {
+ pr_debug("%s(), Wrong chip id=0x%02x\n",
+ __func__, id);
+ }
+ }
+ }
+
+ if (ret) {
+ platform_driver_unregister(&nsc_ircc_driver);
+ pnp_unregister_driver(&nsc_ircc_pnp_driver);
+ pnp_registered = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * Function nsc_ircc_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit nsc_ircc_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
+ if (dev_self[i])
+ nsc_ircc_close(dev_self[i]);
+ }
+
+ platform_driver_unregister(&nsc_ircc_driver);
+
+ if (pnp_registered)
+ pnp_unregister_driver(&nsc_ircc_pnp_driver);
+
+ pnp_registered = 0;
+}
+
+static const struct net_device_ops nsc_ircc_sir_ops = {
+ .ndo_open = nsc_ircc_net_open,
+ .ndo_stop = nsc_ircc_net_close,
+ .ndo_start_xmit = nsc_ircc_hard_xmit_sir,
+ .ndo_do_ioctl = nsc_ircc_net_ioctl,
+};
+
+static const struct net_device_ops nsc_ircc_fir_ops = {
+ .ndo_open = nsc_ircc_net_open,
+ .ndo_stop = nsc_ircc_net_close,
+ .ndo_start_xmit = nsc_ircc_hard_xmit_fir,
+ .ndo_do_ioctl = nsc_ircc_net_ioctl,
+};
+
+/*
+ * Function nsc_ircc_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int __init nsc_ircc_open(chipio_t *info)
+{
+ struct net_device *dev;
+ struct nsc_ircc_cb *self;
+ void *ret;
+ int err, chip_index;
+
+ for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
+ if (!dev_self[chip_index])
+ break;
+ }
+
+ if (chip_index == ARRAY_SIZE(dev_self)) {
+ net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ net_info_ratelimited("%s, Found chip at base=0x%03x\n",
+ driver_name, info->cfg_base);
+
+ if ((nsc_ircc_setup(info)) == -1)
+ return -1;
+
+ net_info_ratelimited("%s, driver loaded (Dag Brattli)\n", driver_name);
+
+ dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
+ if (dev == NULL) {
+ net_err_ratelimited("%s(), can't allocate memory for control block!\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ /* Need to store self somewhere */
+ dev_self[chip_index] = self;
+ self->index = chip_index;
+
+ /* Initialize IO */
+ self->io.cfg_base = info->cfg_base;
+ self->io.fir_base = info->fir_base;
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.fifo_size = 32;
+
+ /* Reserve the ioports that we need */
+ ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
+ if (!ret) {
+ net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
+ err = -ENODEV;
+ goto out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000 |(IR_4000000 << 8);
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 14384;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto out2;
+
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto out3;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ dev->netdev_ops = &nsc_ircc_sir_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
+ goto out4;
+ }
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+
+ /* Check if user has supplied a valid dongle id or not */
+ if ((dongle_id <= 0) ||
+ (dongle_id >= ARRAY_SIZE(dongle_types))) {
+ dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
+
+ net_info_ratelimited("%s, Found dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
+ } else {
+ net_info_ratelimited("%s, Using dongle: %s\n",
+ driver_name, dongle_types[dongle_id]);
+ }
+
+ self->io.dongle_id = dongle_id;
+ nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
+
+ self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME,
+ self->index, NULL, 0);
+ if (IS_ERR(self->pldev)) {
+ err = PTR_ERR(self->pldev);
+ goto out5;
+ }
+ platform_set_drvdata(self->pldev, self);
+
+ return chip_index;
+
+ out5:
+ unregister_netdev(dev);
+ out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ out1:
+ free_netdev(dev);
+ dev_self[chip_index] = NULL;
+ return err;
+}
+
+/*
+ * Function nsc_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
+{
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ iobase = self->io.fir_base;
+
+ platform_device_unregister(self->pldev);
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ dev_self[self->index] = NULL;
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_init_108 (iobase, cfg_base, irq, dma)
+ *
+ * Initialize the NSC '108 chip
+ *
+ */
+static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ __u8 temp=0;
+
+ outb(2, cfg_base); /* Mode Control Register (MCTL) */
+ outb(0x00, cfg_base+1); /* Disable device */
+
+ /* Base Address and Interrupt Control Register (BAIC) */
+ outb(CFG_108_BAIC, cfg_base);
+ switch (info->fir_base) {
+ case 0x3e8: outb(0x14, cfg_base+1); break;
+ case 0x2e8: outb(0x15, cfg_base+1); break;
+ case 0x3f8: outb(0x16, cfg_base+1); break;
+ case 0x2f8: outb(0x17, cfg_base+1); break;
+ default: net_err_ratelimited("%s(), invalid base_address\n", __func__);
+ }
+
+ /* Control Signal Routing Register (CSRT) */
+ switch (info->irq) {
+ case 3: temp = 0x01; break;
+ case 4: temp = 0x02; break;
+ case 5: temp = 0x03; break;
+ case 7: temp = 0x04; break;
+ case 9: temp = 0x05; break;
+ case 11: temp = 0x06; break;
+ case 15: temp = 0x07; break;
+ default: net_err_ratelimited("%s(), invalid irq\n", __func__);
+ }
+ outb(CFG_108_CSRT, cfg_base);
+
+ switch (info->dma) {
+ case 0: outb(0x08+temp, cfg_base+1); break;
+ case 1: outb(0x10+temp, cfg_base+1); break;
+ case 3: outb(0x18+temp, cfg_base+1); break;
+ default: net_err_ratelimited("%s(), invalid dma\n", __func__);
+ }
+
+ outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
+ outb(0x03, cfg_base+1); /* Enable device */
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_108 (chip, info)
+ *
+ *
+ *
+ */
+static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg;
+
+ /* Read address and interrupt control register (BAIC) */
+ outb(CFG_108_BAIC, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch (reg & 0x03) {
+ case 0:
+ info->fir_base = 0x3e8;
+ break;
+ case 1:
+ info->fir_base = 0x2e8;
+ break;
+ case 2:
+ info->fir_base = 0x3f8;
+ break;
+ case 3:
+ info->fir_base = 0x2f8;
+ break;
+ }
+ info->sir_base = info->fir_base;
+ pr_debug("%s(), probing fir_base=0x%03x\n", __func__,
+ info->fir_base);
+
+ /* Read control signals routing register (CSRT) */
+ outb(CFG_108_CSRT, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch (reg & 0x07) {
+ case 0:
+ info->irq = -1;
+ break;
+ case 1:
+ info->irq = 3;
+ break;
+ case 2:
+ info->irq = 4;
+ break;
+ case 3:
+ info->irq = 5;
+ break;
+ case 4:
+ info->irq = 7;
+ break;
+ case 5:
+ info->irq = 9;
+ break;
+ case 6:
+ info->irq = 11;
+ break;
+ case 7:
+ info->irq = 15;
+ break;
+ }
+ pr_debug("%s(), probing irq=%d\n", __func__, info->irq);
+
+ /* Currently we only read Rx DMA but it will also be used for Tx */
+ switch ((reg >> 3) & 0x03) {
+ case 0:
+ info->dma = -1;
+ break;
+ case 1:
+ info->dma = 0;
+ break;
+ case 2:
+ info->dma = 1;
+ break;
+ case 3:
+ info->dma = 3;
+ break;
+ }
+ pr_debug("%s(), probing dma=%d\n", __func__, info->dma);
+
+ /* Read mode control register (MCTL) */
+ outb(CFG_108_MCTL, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->enabled = reg & 0x01;
+ info->suspended = !((reg >> 1) & 0x01);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_init_338 (chip, info)
+ *
+ * Initialize the NSC '338 chip. Remember that the 87338 needs two
+ * consecutive writes to the data registers while CPU interrupts are
+ * disabled. The 97338 does not require this, but shouldn't be any
+ * harm if we do it anyway.
+ */
+static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info)
+{
+ /* No init yet */
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_338 (chip, info)
+ *
+ *
+ *
+ */
+static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg, com = 0;
+ int pnp;
+
+ /* Read function enable register (FER) */
+ outb(CFG_338_FER, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->enabled = (reg >> 2) & 0x01;
+
+ /* Check if we are in Legacy or PnP mode */
+ outb(CFG_338_PNP0, cfg_base);
+ reg = inb(cfg_base+1);
+
+ pnp = (reg >> 3) & 0x01;
+ if (pnp) {
+ pr_debug("(), Chip is in PnP mode\n");
+ outb(0x46, cfg_base);
+ reg = (inb(cfg_base+1) & 0xfe) << 2;
+
+ outb(0x47, cfg_base);
+ reg |= ((inb(cfg_base+1) & 0xfc) << 8);
+
+ info->fir_base = reg;
+ } else {
+ /* Read function address register (FAR) */
+ outb(CFG_338_FAR, cfg_base);
+ reg = inb(cfg_base+1);
+
+ switch ((reg >> 4) & 0x03) {
+ case 0:
+ info->fir_base = 0x3f8;
+ break;
+ case 1:
+ info->fir_base = 0x2f8;
+ break;
+ case 2:
+ com = 3;
+ break;
+ case 3:
+ com = 4;
+ break;
+ }
+
+ if (com) {
+ switch ((reg >> 6) & 0x03) {
+ case 0:
+ if (com == 3)
+ info->fir_base = 0x3e8;
+ else
+ info->fir_base = 0x2e8;
+ break;
+ case 1:
+ if (com == 3)
+ info->fir_base = 0x338;
+ else
+ info->fir_base = 0x238;
+ break;
+ case 2:
+ if (com == 3)
+ info->fir_base = 0x2e8;
+ else
+ info->fir_base = 0x2e0;
+ break;
+ case 3:
+ if (com == 3)
+ info->fir_base = 0x220;
+ else
+ info->fir_base = 0x228;
+ break;
+ }
+ }
+ }
+ info->sir_base = info->fir_base;
+
+ /* Read PnP register 1 (PNP1) */
+ outb(CFG_338_PNP1, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->irq = reg >> 4;
+
+ /* Read PnP register 3 (PNP3) */
+ outb(CFG_338_PNP3, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->dma = (reg & 0x07) - 1;
+
+ /* Read power and test register (PTR) */
+ outb(CFG_338_PTR, cfg_base);
+ reg = inb(cfg_base+1);
+
+ info->suspended = reg & 0x01;
+
+ return 0;
+}
+
+
+/*
+ * Function nsc_ircc_init_39x (chip, info)
+ *
+ * Now that we know it's a '39x (see probe below), we need to
+ * configure it so we can use it.
+ *
+ * The NSC '338 chip is a Super I/O chip with a "bank" architecture,
+ * the configuration of the different functionality (serial, parallel,
+ * floppy...) are each in a different bank (Logical Device Number).
+ * The base address, irq and dma configuration registers are common
+ * to all functionalities (index 0x30 to 0x7F).
+ * There is only one configuration register specific to the
+ * serial port, CFG_39X_SPC.
+ * JeanII
+ *
+ * Note : this code was written by Jan Frey <janfrey@web.de>
+ */
+static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int enabled;
+
+ /* User is sure about his config... accept it. */
+ pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n",
+ __func__, info->fir_base, info->irq, info->dma);
+
+ /* Access bank for SP2 */
+ outb(CFG_39X_LDN, cfg_base);
+ outb(0x02, cfg_base+1);
+
+ /* Configure SP2 */
+
+ /* We want to enable the device if not enabled */
+ outb(CFG_39X_ACT, cfg_base);
+ enabled = inb(cfg_base+1) & 0x01;
+
+ if (!enabled) {
+ /* Enable the device */
+ outb(CFG_39X_SIOCF1, cfg_base);
+ outb(0x01, cfg_base+1);
+ /* May want to update info->enabled. Jean II */
+ }
+
+ /* Enable UART bank switching (bit 7) ; Sets the chip to normal
+ * power mode (wake up from sleep mode) (bit 1) */
+ outb(CFG_39X_SPC, cfg_base);
+ outb(0x82, cfg_base+1);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_probe_39x (chip, info)
+ *
+ * Test if we really have a '39x chip at the given address
+ *
+ * Note : this code was written by Jan Frey <janfrey@web.de>
+ */
+static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
+{
+ int cfg_base = info->cfg_base;
+ int reg1, reg2, irq, irqt, dma1, dma2;
+ int enabled, susp;
+
+ pr_debug("%s(), nsc_ircc_probe_39x, base=%d\n",
+ __func__, cfg_base);
+
+ /* This function should be executed with irq off to avoid
+ * another driver messing with the Super I/O bank - Jean II */
+
+ /* Access bank for SP2 */
+ outb(CFG_39X_LDN, cfg_base);
+ outb(0x02, cfg_base+1);
+
+ /* Read infos about SP2 ; store in info struct */
+ outb(CFG_39X_BASEH, cfg_base);
+ reg1 = inb(cfg_base+1);
+ outb(CFG_39X_BASEL, cfg_base);
+ reg2 = inb(cfg_base+1);
+ info->fir_base = (reg1 << 8) | reg2;
+
+ outb(CFG_39X_IRQNUM, cfg_base);
+ irq = inb(cfg_base+1);
+ outb(CFG_39X_IRQSEL, cfg_base);
+ irqt = inb(cfg_base+1);
+ info->irq = irq;
+
+ outb(CFG_39X_DMA0, cfg_base);
+ dma1 = inb(cfg_base+1);
+ outb(CFG_39X_DMA1, cfg_base);
+ dma2 = inb(cfg_base+1);
+ info->dma = dma1 -1;
+
+ outb(CFG_39X_ACT, cfg_base);
+ info->enabled = enabled = inb(cfg_base+1) & 0x01;
+
+ outb(CFG_39X_SPC, cfg_base);
+ susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
+
+ pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n",
+ __func__, reg1, reg2, irq, irqt, dma1, dma2, enabled, susp);
+
+ /* Configure SP2 */
+
+ /* We want to enable the device if not enabled */
+ outb(CFG_39X_ACT, cfg_base);
+ enabled = inb(cfg_base+1) & 0x01;
+
+ if (!enabled) {
+ /* Enable the device */
+ outb(CFG_39X_SIOCF1, cfg_base);
+ outb(0x01, cfg_base+1);
+ /* May want to update info->enabled. Jean II */
+ }
+
+ /* Enable UART bank switching (bit 7) ; Sets the chip to normal
+ * power mode (wake up from sleep mode) (bit 1) */
+ outb(CFG_39X_SPC, cfg_base);
+ outb(0x82, cfg_base+1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PNP
+/* PNP probing */
+static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
+{
+ memset(&pnp_info, 0, sizeof(chipio_t));
+ pnp_info.irq = -1;
+ pnp_info.dma = -1;
+ pnp_succeeded = 1;
+
+ if (id->driver_data & NSC_FORCE_DONGLE_TYPE9)
+ dongle_id = 0x9;
+
+ /* There doesn't seem to be any way of getting the cfg_base.
+ * On my box, cfg_base is in the PnP descriptor of the
+ * motherboard. Oh well... Jean II */
+
+ if (pnp_port_valid(dev, 0) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.fir_base = pnp_port_start(dev, 0);
+
+ if (pnp_irq_valid(dev, 0) &&
+ !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.irq = pnp_irq(dev, 0);
+
+ if (pnp_dma_valid(dev, 0) &&
+ !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.dma = pnp_dma(dev, 0);
+
+ pr_debug("%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
+ __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
+
+ if((pnp_info.fir_base == 0) ||
+ (pnp_info.irq == -1) || (pnp_info.dma == -1)) {
+ /* Returning an error will disable the device. Yuck ! */
+ //return -EINVAL;
+ pnp_succeeded = 0;
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * Function nsc_ircc_setup (info)
+ *
+ * Returns non-negative on success.
+ *
+ */
+static int nsc_ircc_setup(chipio_t *info)
+{
+ int version;
+ int iobase = info->fir_base;
+
+ /* Read the Module ID */
+ switch_bank(iobase, BANK3);
+ version = inb(iobase+MID);
+
+ pr_debug("%s() Driver %s Found chip version %02x\n",
+ __func__, driver_name, version);
+
+ /* Should be 0x2? */
+ if (0x20 != (version & 0xf0)) {
+ net_err_ratelimited("%s, Wrong chip version %02x\n",
+ driver_name, version);
+ return -1;
+ }
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_EXT_SL, iobase+ECR1);
+ switch_bank(iobase, BANK0);
+
+ /* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+
+ outb(0x03, iobase+LCR); /* 8 bit word length */
+ outb(MCR_SIR, iobase+MCR); /* Start at SIR-mode, also clears LSR*/
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+
+ /* IRCR2: FEND_MD is not set */
+ switch_bank(iobase, BANK5);
+ outb(0x02, iobase+4);
+
+ /* Make sure that some defaults are OK */
+ switch_bank(iobase, BANK6);
+ outb(0x20, iobase+0); /* Set 32 bits FIR CRC */
+ outb(0x0a, iobase+1); /* Set MIR pulse width */
+ outb(0x0d, iobase+2); /* Set SIR pulse width to 1.6us */
+ outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */
+
+ /* Enable receive interrupts */
+ switch_bank(iobase, BANK0);
+ outb(IER_RXHDL_IE, iobase+IER);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_read_dongle_id (void)
+ *
+ * Try to read dongle identification. This procedure needs to be executed
+ * once after power-on/reset. It also needs to be used whenever you suspect
+ * that the user may have plugged/unplugged the IrDA Dongle.
+ */
+static int nsc_ircc_read_dongle_id (int iobase)
+{
+ int dongle_id;
+ __u8 bank;
+
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */
+ outb(0x00, iobase+7);
+
+ /* ID0, 1, and 2 are pulled up/down very slowly */
+ udelay(50);
+
+ /* IRCFG1: read the ID bits */
+ dongle_id = inb(iobase+4) & 0x0f;
+
+#ifdef BROKEN_DONGLE_ID
+ if (dongle_id == 0x0a)
+ dongle_id = 0x09;
+#endif
+ /* Go back to bank 0 before returning */
+ switch_bank(iobase, BANK0);
+
+ outb(bank, iobase+BSR);
+
+ return dongle_id;
+}
+
+/*
+ * Function nsc_ircc_init_dongle_interface (iobase, dongle_id)
+ *
+ * This function initializes the dongle for the transceiver that is
+ * used. This procedure needs to be executed once after
+ * power-on/reset. It also needs to be used whenever you suspect that
+ * the dongle is changed.
+ */
+static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
+{
+ int bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG4: set according to dongle_id */
+ switch (dongle_id) {
+ case 0x00: /* same as */
+ case 0x01: /* Differential serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x02: /* same as */
+ case 0x03: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x04: /* Sharp RY5HD01 */
+ break;
+ case 0x05: /* Reserved, but this is what the Thinkpad reports */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x06: /* Single-ended serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x07: /* Consumer-IR only */
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ outb(0x28, iobase+7); /* Set irsl[0-2] as output */
+ break;
+ case 0x0A: /* same as */
+ case 0x0B: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x0C: /* same as */
+ case 0x0D: /* HP HSDL-1100/HSDL-2100 */
+ /*
+ * Set irsl0 as input, irsl[1-2] as output, and separate
+ * inputs are used for SIR and MIR/FIR
+ */
+ outb(0x48, iobase+7);
+ break;
+ case 0x0E: /* Supports SIR Mode only */
+ outb(0x28, iobase+7); /* Set irsl[0-2] as output */
+ break;
+ case 0x0F: /* No dongle connected */
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
+
+ switch_bank(iobase, BANK0);
+ outb(0x62, iobase+MCR);
+ break;
+ default:
+ pr_debug("%s(), invalid dongle_id %#x",
+ __func__, dongle_id);
+ }
+
+ /* IRCFG1: IRSL1 and 2 are set to IrDA mode */
+ outb(0x00, iobase+4);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+} /* set_up_dongle_interface */
+
+/*
+ * Function nsc_ircc_change_dongle_speed (iobase, speed, dongle_id)
+ *
+ * Change speed of the attach dongle
+ *
+ */
+static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
+{
+ __u8 bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Select Bank 7 */
+ switch_bank(iobase, BANK7);
+
+ /* IRCFG1: set according to dongle_id */
+ switch (dongle_id) {
+ case 0x00: /* same as */
+ case 0x01: /* Differential serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x02: /* same as */
+ case 0x03: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x04: /* Sharp RY5HD01 */
+ break;
+ case 0x05: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x06: /* Single-ended serial interface */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x07: /* Consumer-IR only */
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ pr_debug("%s(), %s\n",
+ __func__, dongle_types[dongle_id]);
+ outb(0x00, iobase+4);
+ if (speed > 115200)
+ outb(0x01, iobase+4);
+ break;
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ outb(0x01, iobase+4);
+
+ if (speed == 4000000) {
+ /* There was a cli() there, but we now are already
+ * under spin_lock_irqsave() - JeanII */
+ outb(0x81, iobase+4);
+ outb(0x80, iobase+4);
+ } else
+ outb(0x00, iobase+4);
+ break;
+ case 0x0A: /* same as */
+ case 0x0B: /* Reserved */
+ pr_debug("%s(), %s not defined by irda yet\n",
+ __func__, dongle_types[dongle_id]);
+ break;
+ case 0x0C: /* same as */
+ case 0x0D: /* HP HSDL-1100/HSDL-2100 */
+ break;
+ case 0x0E: /* Supports SIR Mode only */
+ break;
+ case 0x0F: /* No dongle connected */
+ pr_debug("%s(), %s is not for IrDA mode\n",
+ __func__, dongle_types[dongle_id]);
+
+ switch_bank(iobase, BANK0);
+ outb(0x62, iobase+MCR);
+ break;
+ default:
+ pr_debug("%s(), invalid data_rate\n", __func__);
+ }
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_change_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ * This function *must* be called with irq off and spin-lock.
+ */
+static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
+{
+ struct net_device *dev;
+ __u8 mcr = MCR_SIR;
+ int iobase;
+ __u8 bank;
+ __u8 ier; /* Interrupt enable register */
+
+ pr_debug("%s(), speed=%d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ dev = self->netdev;
+ iobase = self->io.fir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ /* Select Bank 2 */
+ switch_bank(iobase, BANK2);
+
+ outb(0x00, iobase+BGDH);
+ switch (speed) {
+ case 9600: outb(0x0c, iobase+BGDL); break;
+ case 19200: outb(0x06, iobase+BGDL); break;
+ case 38400: outb(0x03, iobase+BGDL); break;
+ case 57600: outb(0x02, iobase+BGDL); break;
+ case 115200: outb(0x01, iobase+BGDL); break;
+ case 576000:
+ switch_bank(iobase, BANK5);
+
+ /* IRCR2: MDRS is set */
+ outb(inb(iobase+4) | 0x04, iobase+4);
+
+ mcr = MCR_MIR;
+ pr_debug("%s(), handling baud of 576000\n", __func__);
+ break;
+ case 1152000:
+ mcr = MCR_MIR;
+ pr_debug("%s(), handling baud of 1152000\n", __func__);
+ break;
+ case 4000000:
+ mcr = MCR_FIR;
+ pr_debug("%s(), handling baud of 4000000\n", __func__);
+ break;
+ default:
+ mcr = MCR_FIR;
+ pr_debug("%s(), unknown baud rate of %d\n",
+ __func__, speed);
+ break;
+ }
+
+ /* Set appropriate speed mode */
+ switch_bank(iobase, BANK0);
+ outb(mcr | MCR_TX_DFR, iobase+MCR);
+
+ /* Give some hits to the transceiver */
+ nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, BANK0);
+ outb(0x00, iobase+FCR);
+ outb(FCR_FIFO_EN, iobase+FCR);
+ outb(FCR_RXTH| /* Set Rx FIFO threshold */
+ FCR_TXTH| /* Set Tx FIFO threshold */
+ FCR_TXSR| /* Reset Tx FIFO */
+ FCR_RXSR| /* Reset Rx FIFO */
+ FCR_FIFO_EN, /* Enable FIFOs */
+ iobase+FCR);
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, BANK2);
+ outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
+
+ /* Enable some interrupts so we can receive frames */
+ switch_bank(iobase, BANK0);
+ if (speed > 115200) {
+ /* Install FIR xmit handler */
+ dev->netdev_ops = &nsc_ircc_fir_ops;
+ ier = IER_SFIF_IE;
+ nsc_ircc_dma_receive(self);
+ } else {
+ /* Install SIR xmit handler */
+ dev->netdev_ops = &nsc_ircc_sir_ops;
+ ier = IER_RXHDL_IE;
+ }
+ /* Set our current interrupt mask */
+ outb(ier, iobase+IER);
+
+ /* Restore BSR */
+ outb(bank, iobase+BSR);
+
+ /* Make sure interrupt handlers keep the proper interrupt mask */
+ return ier;
+}
+
+/*
+ * Function nsc_ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __s32 speed;
+ __u8 bank;
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame. */
+ if (!skb->len) {
+ /* If we just sent a frame, we get called before
+ * the last bytes get out (because of the SIR FIFO).
+ * If this is the case, let interrupt handler change
+ * the speed itself... Jean II */
+ if (self->io.direction == IO_RECV) {
+ nsc_ircc_change_speed(self, speed);
+ /* TODO : For SIR->SIR, the next packet
+ * may get corrupted - Jean II */
+ netif_wake_queue(dev);
+ } else {
+ self->new_speed = speed;
+ /* Queue will be restarted after speed change
+ * to make sure packets gets through the
+ * proper xmit handler - Jean II */
+ }
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ self->tx_buff.data = self->tx_buff.head;
+
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ dev->stats.tx_bytes += self->tx_buff.len;
+
+ /* Add interrupt on tx low level (will fire immediately) */
+ switch_bank(iobase, BANK0);
+ outb(IER_TXLDL_IE, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int iobase;
+ __s32 speed;
+ __u8 bank;
+ int mtt, diff;
+
+ self = netdev_priv(dev);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+
+ /* Make sure tests *& speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame. */
+ if (!skb->len) {
+ /* If we are currently transmitting, defer to
+ * interrupt handler. - Jean II */
+ if(self->tx_fifo.len == 0) {
+ nsc_ircc_change_speed(self, speed);
+ netif_wake_queue(dev);
+ } else {
+ self->new_speed = speed;
+ /* Keep queue stopped :
+ * the speed change operation may change the
+ * xmit handler, and we want to make sure
+ * the next packet get through the proper
+ * Tx path, so block the Tx queue until
+ * the speed change has been done.
+ * Jean II */
+ }
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else {
+ /* Change speed after current frame */
+ self->new_speed = speed;
+ }
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Register and copy this frame to DMA memory */
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+ self->tx_fifo.tail += skb->len;
+
+ dev->stats.tx_bytes += skb->len;
+
+ skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
+ skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+
+ /* Start transmit only if there is currently no transmit going on */
+ if (self->tx_fifo.len == 1) {
+ /* Check if we must wait the min turn time or not */
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ /* Check how much time we have used already */
+ diff = ktime_us_delta(ktime_get(), self->stamp);
+
+ /* Check if the mtt is larger than the time we have
+ * already used by all the protocol processing
+ */
+ if (mtt > diff) {
+ mtt -= diff;
+
+ /*
+ * Use timer if delay larger than 125 us, and
+ * use udelay for smaller values which should
+ * be acceptable
+ */
+ if (mtt > 125) {
+ /* Adjust for timer resolution */
+ mtt = mtt / 125;
+
+ /* Setup timer */
+ switch_bank(iobase, BANK4);
+ outb(mtt & 0xff, iobase+TMRL);
+ outb((mtt >> 8) & 0x0f, iobase+TMRH);
+
+ /* Start timer */
+ outb(IRCR1_TMR_EN, iobase+IRCR1);
+ self->io.direction = IO_XMIT;
+
+ /* Enable timer interrupt */
+ switch_bank(iobase, BANK0);
+ outb(IER_TMR_IE, iobase+IER);
+
+ /* Timer will take care of the rest */
+ goto out;
+ } else
+ udelay(mtt);
+ }
+ }
+ /* Enable DMA interrupt */
+ switch_bank(iobase, BANK0);
+ outb(IER_DMA_IE, iobase+IER);
+
+ /* Transmit frame */
+ nsc_ircc_dma_xmit(self, iobase);
+ }
+ out:
+ /* Not busy transmitting anymore if window is not full,
+ * and if we don't need to change speed */
+ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0))
+ netif_wake_queue(self->netdev);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function nsc_ircc_dma_xmit (self, iobase)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase)
+{
+ int bsr;
+
+ /* Save current bank */
+ bsr = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ self->io.direction = IO_XMIT;
+
+ /* Choose transmit DMA channel */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
+
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ DMA_TX_MODE);
+
+ /* Enable DMA and SIR interaction pulse */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_TX_DFR|MCR_DMA_EN|MCR_IR_PLS, iobase+MCR);
+
+ /* Restore bank register */
+ outb(bsr, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_pio_xmit (self, iobase)
+ *
+ * Transmit data using PIO. Returns the number of bytes that actually
+ * got transferred
+ *
+ */
+static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
+{
+ int actual = 0;
+ __u8 bank;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ switch_bank(iobase, BANK0);
+ if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
+ pr_debug("%s(), warning, FIFO not empty yet!\n",
+ __func__);
+
+ /* FIFO may still be filled to the Tx interrupt threshold */
+ fifo_size -= 17;
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual++], iobase+TXD);
+ }
+
+ pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
+
+ /* Restore bank */
+ outb(bank, iobase+BSR);
+
+ return actual;
+}
+
+/*
+ * Function nsc_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
+{
+ int iobase;
+ __u8 bank;
+ int ret = TRUE;
+
+ iobase = self->io.fir_base;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ /* Check for underrun! */
+ if (inb(iobase+ASCR) & ASCR_TXUR) {
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+
+ /* Clear bit, by writing 1 into it */
+ outb(ASCR_TXUR, iobase+ASCR);
+ } else {
+ self->netdev->stats.tx_packets++;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ self->tx_fifo.ptr++;
+ self->tx_fifo.len--;
+
+ /* Any frames to be sent back-to-back? */
+ if (self->tx_fifo.len) {
+ nsc_ircc_dma_xmit(self, iobase);
+
+ /* Not finished yet! */
+ ret = FALSE;
+ } else {
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ }
+
+ /* Make sure we have room for more frames and
+ * that we don't need to change speed */
+ if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) {
+ /* Not busy transmitting anymore */
+ /* Tell the network layer, that we can accept more frames */
+ netif_wake_queue(self->netdev);
+ }
+
+ /* Restore bank */
+ outb(bank, iobase+BSR);
+
+ return ret;
+}
+
+/*
+ * Function nsc_ircc_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self)
+{
+ int iobase;
+ __u8 bsr;
+
+ iobase = self->io.fir_base;
+
+ /* Reset Tx FIFO info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Save current bank */
+ bsr = inb(iobase+BSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
+
+ /* Choose DMA Rx, DMA Fairness, and Advanced mode */
+ switch_bank(iobase, BANK2);
+ outb(ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Rx FIFO. This will also flush the ST_FIFO */
+ switch_bank(iobase, BANK0);
+ outb(FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
+
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Enable DMA */
+ switch_bank(iobase, BANK0);
+ outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR);
+
+ /* Restore bank register */
+ outb(bsr, iobase+BSR);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_dma_receive_complete (self)
+ *
+ * Finished with receiving frames
+ *
+ *
+ */
+static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ __u8 status;
+ __u8 bank;
+ int len;
+
+ st_fifo = &self->st_fifo;
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Read all entries in status FIFO */
+ switch_bank(iobase, BANK5);
+ while ((status = inb(iobase+FRM_ST)) & FRM_ST_VLD) {
+ /* We must empty the status FIFO no matter what */
+ len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
+
+ if (st_fifo->tail >= MAX_RX_WINDOW) {
+ pr_debug("%s(), window is full!\n", __func__);
+ continue;
+ }
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+ /* Try to process all entries in status FIFO */
+ while (st_fifo->len > 0) {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->pending_bytes -= len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if (status & FRM_ST_ERR_MSK) {
+ if (status & FRM_ST_LOST_FR) {
+ /* Add number of lost frames to stats */
+ self->netdev->stats.rx_errors += len;
+ } else {
+ /* Skip frame */
+ self->netdev->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & FRM_ST_MAX_LEN)
+ self->netdev->stats.rx_length_errors++;
+
+ if (status & FRM_ST_PHY_ERR)
+ self->netdev->stats.rx_frame_errors++;
+
+ if (status & FRM_ST_BAD_CRC)
+ self->netdev->stats.rx_crc_errors++;
+ }
+ /* The errors below can be reported in both cases */
+ if (status & FRM_ST_OVR1)
+ self->netdev->stats.rx_fifo_errors++;
+
+ if (status & FRM_ST_OVR2)
+ self->netdev->stats.rx_fifo_errors++;
+ } else {
+ /*
+ * First we must make sure that the frame we
+ * want to deliver is all in main memory. If we
+ * cannot tell, then we check if the Rx FIFO is
+ * empty. If not then we will have to take a nap
+ * and try again later.
+ */
+ if (st_fifo->pending_bytes < self->io.fifo_size) {
+ switch_bank(iobase, BANK0);
+ if (inb(iobase+LSR) & LSR_RXDA) {
+ /* Put this entry back in fifo */
+ st_fifo->head--;
+ st_fifo->len++;
+ st_fifo->pending_bytes += len;
+ st_fifo->entries[st_fifo->head].status = status;
+ st_fifo->entries[st_fifo->head].len = len;
+ /*
+ * DMA not finished yet, so try again
+ * later, set timer value, resolution
+ * 125 us
+ */
+ switch_bank(iobase, BANK4);
+ outb(0x02, iobase+TMRL); /* x 125 us */
+ outb(0x00, iobase+TMRH);
+
+ /* Start timer */
+ outb(IRCR1_TMR_EN, iobase+IRCR1);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return FALSE; /* I'll be back! */
+ }
+ }
+
+ /*
+ * Remember the time we received this frame, so we can
+ * reduce the min turn time a bit since we will know
+ * how much time we have used for protocol processing
+ */
+ self->stamp = ktime_get();
+
+ skb = dev_alloc_skb(len+1);
+ if (skb == NULL) {
+ self->netdev->stats.rx_dropped++;
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return FALSE;
+ }
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC */
+ if (self->io.speed < 4000000) {
+ skb_put(skb, len-2);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 2);
+ } else {
+ skb_put(skb, len-4);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 4);
+ }
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ }
+ }
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return TRUE;
+}
+
+/*
+ * Function nsc_ircc_pio_receive (self)
+ *
+ * Receive all data in receiver FIFO
+ *
+ */
+static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self)
+{
+ __u8 byte;
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ /* Receive all characters in Rx FIFO */
+ do {
+ byte = inb(iobase+RXD);
+ async_unwrap_char(self->netdev, &self->netdev->stats,
+ &self->rx_buff, byte);
+ } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */
+}
+
+/*
+ * Function nsc_ircc_sir_interrupt (self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
+{
+ int actual;
+
+ /* Check if transmit FIFO is low on data */
+ if (eir & EIR_TXLDL_EV) {
+ /* Write data left in transmit buffer */
+ actual = nsc_ircc_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
+ self->io.fifo_size);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+
+ self->io.direction = IO_XMIT;
+
+ /* Check if finished */
+ if (self->tx_buff.len > 0)
+ self->ier = IER_TXLDL_IE;
+ else {
+
+ self->netdev->stats.tx_packets++;
+ netif_wake_queue(self->netdev);
+ self->ier = IER_TXEMP_IE;
+ }
+
+ }
+ /* Check if transmission has completed */
+ if (eir & EIR_TXEMP_EV) {
+ /* Turn around and get ready to receive some data */
+ self->io.direction = IO_RECV;
+ self->ier = IER_RXHDL_IE;
+ /* Check if we need to change the speed?
+ * Need to be after self->io.direction to avoid race with
+ * nsc_ircc_hard_xmit_sir() - Jean II */
+ if (self->new_speed) {
+ pr_debug("%s(), Changing speed!\n", __func__);
+ self->ier = nsc_ircc_change_speed(self,
+ self->new_speed);
+ self->new_speed = 0;
+ netif_wake_queue(self->netdev);
+
+ /* Check if we are going to FIR */
+ if (self->io.speed > 115200) {
+ /* No need to do anymore SIR stuff */
+ return;
+ }
+ }
+ }
+
+ /* Rx FIFO threshold or timeout */
+ if (eir & EIR_RXHDL_EV) {
+ nsc_ircc_pio_receive(self);
+
+ /* Keep receiving */
+ self->ier = IER_RXHDL_IE;
+ }
+}
+
+/*
+ * Function nsc_ircc_fir_interrupt (self, eir)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
+ int eir)
+{
+ __u8 bank;
+
+ bank = inb(iobase+BSR);
+
+ /* Status FIFO event*/
+ if (eir & EIR_SFIF_EV) {
+ /* Check if DMA has finished */
+ if (nsc_ircc_dma_receive_complete(self, iobase)) {
+ /* Wait for next status FIFO interrupt */
+ self->ier = IER_SFIF_IE;
+ } else {
+ self->ier = IER_SFIF_IE | IER_TMR_IE;
+ }
+ } else if (eir & EIR_TMR_EV) { /* Timer finished */
+ /* Disable timer */
+ switch_bank(iobase, BANK4);
+ outb(0, iobase+IRCR1);
+
+ /* Clear timer event */
+ switch_bank(iobase, BANK0);
+ outb(ASCR_CTE, iobase+ASCR);
+
+ /* Check if this is a Tx timer interrupt */
+ if (self->io.direction == IO_XMIT) {
+ nsc_ircc_dma_xmit(self, iobase);
+
+ /* Interrupt on DMA */
+ self->ier = IER_DMA_IE;
+ } else {
+ /* Check (again) if DMA has finished */
+ if (nsc_ircc_dma_receive_complete(self, iobase)) {
+ self->ier = IER_SFIF_IE;
+ } else {
+ self->ier = IER_SFIF_IE | IER_TMR_IE;
+ }
+ }
+ } else if (eir & EIR_DMA_EV) {
+ /* Finished with all transmissions? */
+ if (nsc_ircc_dma_xmit_complete(self)) {
+ if(self->new_speed != 0) {
+ /* As we stop the Tx queue, the speed change
+ * need to be done when the Tx fifo is
+ * empty. Ask for a Tx done interrupt */
+ self->ier = IER_TXEMP_IE;
+ } else {
+ /* Check if there are more frames to be
+ * transmitted */
+ if (irda_device_txqueue_empty(self->netdev)) {
+ /* Prepare for receive */
+ nsc_ircc_dma_receive(self);
+ self->ier = IER_SFIF_IE;
+ } else
+ net_warn_ratelimited("%s(), potential Tx queue lockup !\n",
+ __func__);
+ }
+ } else {
+ /* Not finished yet, so interrupt on DMA again */
+ self->ier = IER_DMA_IE;
+ }
+ } else if (eir & EIR_TXEMP_EV) {
+ /* The Tx FIFO has totally drained out, so now we can change
+ * the speed... - Jean II */
+ self->ier = nsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ netif_wake_queue(self->netdev);
+ /* Note : nsc_ircc_change_speed() restarted Rx fifo */
+ }
+
+ outb(bank, iobase+BSR);
+}
+
+/*
+ * Function nsc_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct nsc_ircc_cb *self;
+ __u8 bsr, eir;
+ int iobase;
+
+ self = netdev_priv(dev);
+
+ spin_lock(&self->lock);
+
+ iobase = self->io.fir_base;
+
+ bsr = inb(iobase+BSR); /* Save current bank */
+
+ switch_bank(iobase, BANK0);
+ self->ier = inb(iobase+IER);
+ eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */
+
+ outb(0, iobase+IER); /* Disable interrupts */
+
+ if (eir) {
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > 115200)
+ nsc_ircc_fir_interrupt(self, iobase, eir);
+ else
+ nsc_ircc_sir_interrupt(self, eir);
+ }
+
+ outb(self->ier, iobase+IER); /* Restore interrupts */
+ outb(bsr, iobase+BSR); /* Restore bank register */
+
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(eir);
+}
+
+/*
+ * Function nsc_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self)
+{
+ unsigned long flags;
+ int status = FALSE;
+ int iobase;
+ __u8 bank;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ if (self->io.speed > 115200) {
+ iobase = self->io.fir_base;
+
+ /* Check if rx FIFO is not empty */
+ bank = inb(iobase+BSR);
+ switch_bank(iobase, BANK2);
+ if ((inb(iobase+RXFLV) & 0x3f) != 0) {
+ /* We are receiving something */
+ status = TRUE;
+ }
+ outb(bank, iobase+BSR);
+ } else
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return status;
+}
+
+/*
+ * Function nsc_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int nsc_ircc_net_open(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+ __u8 bank;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* turn on interrupts */
+ switch_bank(iobase, BANK0);
+ outb(IER_LS_IE | IER_RXHDL_IE, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /* Give self a hardware name */
+ sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int nsc_ircc_net_close(struct net_device *dev)
+{
+ struct nsc_ircc_cb *self;
+ int iobase;
+ __u8 bank;
+
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ iobase = self->io.fir_base;
+
+ disable_dma(self->io.dma);
+
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ return 0;
+}
+
+/*
+ * Function nsc_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct nsc_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ /* This is already protected */
+ irq->ifr_receiving = nsc_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct nsc_ircc_cb *self = platform_get_drvdata(dev);
+ int bank;
+ unsigned long flags;
+ int iobase = self->io.fir_base;
+
+ if (self->io.suspended)
+ return 0;
+
+ pr_debug("%s, Suspending\n", driver_name);
+
+ rtnl_lock();
+ if (netif_running(self->netdev)) {
+ netif_device_detach(self->netdev);
+ spin_lock_irqsave(&self->lock, flags);
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+ free_irq(self->io.irq, self->netdev);
+ disable_dma(self->io.dma);
+ }
+ self->io.suspended = 1;
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int nsc_ircc_resume(struct platform_device *dev)
+{
+ struct nsc_ircc_cb *self = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ if (!self->io.suspended)
+ return 0;
+
+ pr_debug("%s, Waking up\n", driver_name);
+
+ rtnl_lock();
+ nsc_ircc_setup(&self->io);
+ nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id);
+
+ if (netif_running(self->netdev)) {
+ if (request_irq(self->io.irq, nsc_ircc_interrupt, 0,
+ self->netdev->name, self->netdev)) {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+
+ /*
+ * Don't fail resume process, just kill this
+ * network interface
+ */
+ unregister_netdevice(self->netdev);
+ } else {
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, self->io.speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ netif_device_attach(self->netdev);
+ }
+
+ } else {
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, 9600);
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
+ self->io.suspended = 0;
+ rtnl_unlock();
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("NSC IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+module_param_hw_array(dma, int, dma, NULL, 0);
+MODULE_PARM_DESC(dma, "DMA channels");
+module_param(dongle_id, int, 0);
+MODULE_PARM_DESC(dongle_id, "Type-id of used dongle");
+
+module_init(nsc_ircc_init);
+module_exit(nsc_ircc_cleanup);
+
diff --git a/drivers/staging/irda/drivers/nsc-ircc.h b/drivers/staging/irda/drivers/nsc-ircc.h
new file mode 100644
index 000000000000..7be5acb56532
--- /dev/null
+++ b/drivers/staging/irda/drivers/nsc-ircc.h
@@ -0,0 +1,281 @@
+/*********************************************************************
+ *
+ * Filename: nsc-ircc.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Nov 13 14:37:40 1998
+ * Modified at: Sun Jan 23 17:47:00 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
+ * Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef NSC_IRCC_H
+#define NSC_IRCC_H
+
+#include <linux/ktime.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* Features for chips (set in driver_data) */
+#define NSC_FORCE_DONGLE_TYPE9 0x00000001
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+/* Config registers for the '108 */
+#define CFG_108_BAIC 0x00
+#define CFG_108_CSRT 0x01
+#define CFG_108_MCTL 0x02
+
+/* Config registers for the '338 */
+#define CFG_338_FER 0x00
+#define CFG_338_FAR 0x01
+#define CFG_338_PTR 0x02
+#define CFG_338_PNP0 0x1b
+#define CFG_338_PNP1 0x1c
+#define CFG_338_PNP3 0x4f
+
+/* Config registers for the '39x (in the logical device bank) */
+#define CFG_39X_LDN 0x07 /* Logical device number (Super I/O bank) */
+#define CFG_39X_SIOCF1 0x21 /* SuperI/O Config */
+#define CFG_39X_ACT 0x30 /* Device activation */
+#define CFG_39X_BASEH 0x60 /* Device base address (high bits) */
+#define CFG_39X_BASEL 0x61 /* Device base address (low bits) */
+#define CFG_39X_IRQNUM 0x70 /* Interrupt number & wake up enable */
+#define CFG_39X_IRQSEL 0x71 /* Interrupt select (edge/level + polarity) */
+#define CFG_39X_DMA0 0x74 /* DMA 0 configuration */
+#define CFG_39X_DMA1 0x75 /* DMA 1 configuration */
+#define CFG_39X_SPC 0xF0 /* Serial port configuration register */
+
+/* Flags for configuration register CRF0 */
+#define APEDCRC 0x02
+#define ENBNKSEL 0x01
+
+/* Set 0 */
+#define TXD 0x00 /* Transmit data port */
+#define RXD 0x00 /* Receive data port */
+
+/* Register 1 */
+#define IER 0x01 /* Interrupt Enable Register*/
+#define IER_RXHDL_IE 0x01 /* Receiver high data level interrupt */
+#define IER_TXLDL_IE 0x02 /* Transeiver low data level interrupt */
+#define IER_LS_IE 0x04//* Link Status Interrupt */
+#define IER_ETXURI 0x04 /* Tx underrun */
+#define IER_DMA_IE 0x10 /* DMA finished interrupt */
+#define IER_TXEMP_IE 0x20
+#define IER_SFIF_IE 0x40 /* Frame status FIFO intr */
+#define IER_TMR_IE 0x80 /* Timer event */
+
+#define FCR 0x02 /* (write only) */
+#define FCR_FIFO_EN 0x01 /* Enable FIFO's */
+#define FCR_RXSR 0x02 /* Rx FIFO soft reset */
+#define FCR_TXSR 0x04 /* Tx FIFO soft reset */
+#define FCR_RXTH 0x40 /* Rx FIFO threshold (set to 16) */
+#define FCR_TXTH 0x20 /* Tx FIFO threshold (set to 17) */
+
+#define EIR 0x02 /* (read only) */
+#define EIR_RXHDL_EV 0x01
+#define EIR_TXLDL_EV 0x02
+#define EIR_LS_EV 0x04
+#define EIR_DMA_EV 0x10
+#define EIR_TXEMP_EV 0x20
+#define EIR_SFIF_EV 0x40
+#define EIR_TMR_EV 0x80
+
+#define LCR 0x03 /* Link control register */
+#define LCR_WLS_8 0x03 /* 8 bits */
+
+#define BSR 0x03 /* Bank select register */
+#define BSR_BKSE 0x80
+#define BANK0 LCR_WLS_8 /* Must make sure that we set 8N1 */
+#define BANK1 0x80
+#define BANK2 0xe0
+#define BANK3 0xe4
+#define BANK4 0xe8
+#define BANK5 0xec
+#define BANK6 0xf0
+#define BANK7 0xf4
+
+#define MCR 0x04 /* Mode Control Register */
+#define MCR_MODE_MASK ~(0xd0)
+#define MCR_UART 0x00
+#define MCR_RESERVED 0x20
+#define MCR_SHARP_IR 0x40
+#define MCR_SIR 0x60
+#define MCR_MIR 0x80
+#define MCR_FIR 0xa0
+#define MCR_CEIR 0xb0
+#define MCR_IR_PLS 0x10
+#define MCR_DMA_EN 0x04
+#define MCR_EN_IRQ 0x08
+#define MCR_TX_DFR 0x08
+
+#define LSR 0x05 /* Link status register */
+#define LSR_RXDA 0x01 /* Receiver data available */
+#define LSR_TXRDY 0x20 /* Transmitter ready */
+#define LSR_TXEMP 0x40 /* Transmitter empty */
+
+#define ASCR 0x07 /* Auxiliary Status and Control Register */
+#define ASCR_RXF_TOUT 0x01 /* Rx FIFO timeout */
+#define ASCR_FEND_INF 0x02 /* Frame end bytes in rx FIFO */
+#define ASCR_S_EOT 0x04 /* Set end of transmission */
+#define ASCT_RXBSY 0x20 /* Rx busy */
+#define ASCR_TXUR 0x40 /* Transeiver underrun */
+#define ASCR_CTE 0x80 /* Clear timer event */
+
+/* Bank 2 */
+#define BGDL 0x00 /* Baud Generator Divisor Port (Low Byte) */
+#define BGDH 0x01 /* Baud Generator Divisor Port (High Byte) */
+
+#define ECR1 0x02 /* Extended Control Register 1 */
+#define ECR1_EXT_SL 0x01 /* Extended Mode Select */
+#define ECR1_DMANF 0x02 /* DMA Fairness */
+#define ECR1_DMATH 0x04 /* DMA Threshold */
+#define ECR1_DMASWP 0x08 /* DMA Swap */
+
+#define EXCR2 0x04
+#define EXCR2_TFSIZ 0x01 /* Rx FIFO size = 32 */
+#define EXCR2_RFSIZ 0x04 /* Tx FIFO size = 32 */
+
+#define TXFLV 0x06 /* Tx FIFO level */
+#define RXFLV 0x07 /* Rx FIFO level */
+
+/* Bank 3 */
+#define MID 0x00
+
+/* Bank 4 */
+#define TMRL 0x00 /* Timer low byte */
+#define TMRH 0x01 /* Timer high byte */
+#define IRCR1 0x02 /* Infrared control register 1 */
+#define IRCR1_TMR_EN 0x01 /* Timer enable */
+
+#define TFRLL 0x04
+#define TFRLH 0x05
+#define RFRLL 0x06
+#define RFRLH 0x07
+
+/* Bank 5 */
+#define IRCR2 0x04 /* Infrared control register 2 */
+#define IRCR2_MDRS 0x04 /* MIR data rate select */
+#define IRCR2_FEND_MD 0x20 /* */
+
+#define FRM_ST 0x05 /* Frame status FIFO */
+#define FRM_ST_VLD 0x80 /* Frame status FIFO data valid */
+#define FRM_ST_ERR_MSK 0x5f
+#define FRM_ST_LOST_FR 0x40 /* Frame lost */
+#define FRM_ST_MAX_LEN 0x10 /* Max frame len exceeded */
+#define FRM_ST_PHY_ERR 0x08 /* Physical layer error */
+#define FRM_ST_BAD_CRC 0x04
+#define FRM_ST_OVR1 0x02 /* Rx FIFO overrun */
+#define FRM_ST_OVR2 0x01 /* Frame status FIFO overrun */
+
+#define RFLFL 0x06
+#define RFLFH 0x07
+
+/* Bank 6 */
+#define IR_CFG2 0x00
+#define IR_CFG2_DIS_CRC 0x02
+
+/* Bank 7 */
+#define IRM_CR 0x07 /* Infrared module control register */
+#define IRM_CR_IRX_MSL 0x40
+#define IRM_CR_AF_MNT 0x80 /* Automatic format */
+
+/* NSC chip information */
+struct nsc_chip {
+ char *name; /* Name of chipset */
+ int cfg[3]; /* Config registers */
+ u_int8_t cid_index; /* Chip identification index reg */
+ u_int8_t cid_value; /* Chip identification expected value */
+ u_int8_t cid_mask; /* Chip identification revision mask */
+
+ /* Functions for probing and initializing the specific chip */
+ int (*probe)(struct nsc_chip *chip, chipio_t *info);
+ int (*init)(struct nsc_chip *chip, chipio_t *info);
+};
+typedef struct nsc_chip nsc_chip_t;
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Length of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+/* Private data for each instance */
+struct nsc_ircc_cb {
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ ktime_t stamp;
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ struct platform_device *pldev;
+};
+
+static inline void switch_bank(int iobase, int bank)
+{
+ outb(bank, iobase+BSR);
+}
+
+#endif /* NSC_IRCC_H */
diff --git a/drivers/staging/irda/drivers/old_belkin-sir.c b/drivers/staging/irda/drivers/old_belkin-sir.c
new file mode 100644
index 000000000000..a7c2e990ae69
--- /dev/null
+++ b/drivers/staging/irda/drivers/old_belkin-sir.c
@@ -0,0 +1,146 @@
+/*********************************************************************
+ *
+ * Filename: old_belkin.c
+ * Version: 1.1
+ * Description: Driver for the Belkin (old) SmartBeam dongle
+ * Status: Experimental...
+ * Author: Jean Tourrilhes <jt@hpl.hp.com>
+ * Created at: 22/11/99
+ * Modified at: Fri Dec 17 09:13:32 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+// #include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+
+/*
+ * Belkin is selling a dongle called the SmartBeam.
+ * In fact, there is two hardware version of this dongle, of course with
+ * the same name and looking the exactly same (grrr...).
+ * I guess that I've got the old one, because inside I don't have
+ * a jumper for IrDA/ASK...
+ *
+ * As far as I can make it from info on their web site, the old dongle
+ * support only 9600 b/s, which make our life much simpler as far as
+ * the driver is concerned, but you might not like it very much ;-)
+ * The new SmartBeam does 115 kb/s, and I've not tested it...
+ *
+ * Belkin claim that the correct driver for the old dongle (in Windows)
+ * is the generic Parallax 9500a driver, but the Linux LiteLink driver
+ * fails for me (probably because Linux-IrDA doesn't rate fallback),
+ * so I created this really dumb driver...
+ *
+ * In fact, this driver doesn't do much. The only thing it does is to
+ * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This
+ * driver is called "old_belkin" so that when the new SmartBeam is supported
+ * its driver can be called "belkin" instead of "new_belkin".
+ *
+ * Note : this driver was written without any info/help from Belkin,
+ * so a lot of info here might be totally wrong. Blame me ;-)
+ */
+
+static int old_belkin_open(struct sir_dev *dev);
+static int old_belkin_close(struct sir_dev *dev);
+static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed);
+static int old_belkin_reset(struct sir_dev *dev);
+
+static struct dongle_driver old_belkin = {
+ .owner = THIS_MODULE,
+ .driver_name = "Old Belkin SmartBeam",
+ .type = IRDA_OLD_BELKIN_DONGLE,
+ .open = old_belkin_open,
+ .close = old_belkin_close,
+ .reset = old_belkin_reset,
+ .set_speed = old_belkin_change_speed,
+};
+
+static int __init old_belkin_sir_init(void)
+{
+ return irda_register_dongle(&old_belkin);
+}
+
+static void __exit old_belkin_sir_cleanup(void)
+{
+ irda_unregister_dongle(&old_belkin);
+}
+
+static int old_belkin_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Power on dongle */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Not too fast, please... */
+ qos->baud_rate.bits &= IR_9600;
+ /* Needs at least 10 ms (totally wild guess, can do probably better) */
+ qos->min_turn_time.bits = 0x01;
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int old_belkin_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function old_belkin_change_speed (task)
+ *
+ * With only one speed available, not much to do...
+ */
+static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ dev->speed = 9600;
+ return (speed==dev->speed) ? 0 : -EINVAL;
+}
+
+/*
+ * Function old_belkin_reset (task)
+ *
+ * Reset the Old-Belkin type dongle.
+ *
+ */
+static int old_belkin_reset(struct sir_dev *dev)
+{
+ /* This dongles speed "defaults" to 9600 bps ;-) */
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
+MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */
+
+module_init(old_belkin_sir_init);
+module_exit(old_belkin_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/pxaficp_ir.c b/drivers/staging/irda/drivers/pxaficp_ir.c
new file mode 100644
index 000000000000..1dba16bc7f8d
--- /dev/null
+++ b/drivers/staging/irda/drivers/pxaficp_ir.c
@@ -0,0 +1,1076 @@
+/*
+ * linux/drivers/net/irda/pxaficp_ir.c
+ *
+ * Based on sa1100_ir.c by Russell King
+ *
+ * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/sched/clock.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <linux/platform_data/irda-pxaficp.h>
+#undef __REG
+#define __REG(x) ((x) & 0xffff)
+#include <mach/regs-uart.h>
+
+#define ICCR0 0x0000 /* ICP Control Register 0 */
+#define ICCR1 0x0004 /* ICP Control Register 1 */
+#define ICCR2 0x0008 /* ICP Control Register 2 */
+#define ICDR 0x000c /* ICP Data Register */
+#define ICSR0 0x0014 /* ICP Status Register 0 */
+#define ICSR1 0x0018 /* ICP Status Register 1 */
+
+#define ICCR0_AME (1 << 7) /* Address match enable */
+#define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
+#define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */
+#define ICCR0_RXE (1 << 4) /* Receive enable */
+#define ICCR0_TXE (1 << 3) /* Transmit enable */
+#define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
+#define ICCR0_LBM (1 << 1) /* Loopback mode */
+#define ICCR0_ITR (1 << 0) /* IrDA transmission */
+
+#define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
+#define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
+#define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
+#define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
+#define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
+#define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
+
+#define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
+#define ICSR0_FRE (1 << 5) /* Framing error */
+#define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
+#define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
+#define ICSR0_RAB (1 << 2) /* Receiver abort */
+#define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
+#define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
+
+#define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
+#define ICSR1_CRE (1 << 5) /* CRC error */
+#define ICSR1_EOF (1 << 4) /* End of frame */
+#define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
+#define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
+#define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
+#define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
+
+#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
+#define IrSR_RXPL_POS_IS_ZERO 0x0
+#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
+#define IrSR_TXPL_POS_IS_ZERO 0x0
+#define IrSR_XMODE_PULSE_1_6 (1<<2)
+#define IrSR_XMODE_PULSE_3_16 0x0
+#define IrSR_RCVEIR_IR_MODE (1<<1)
+#define IrSR_RCVEIR_UART_MODE 0x0
+#define IrSR_XMITIR_IR_MODE (1<<0)
+#define IrSR_XMITIR_UART_MODE 0x0
+
+#define IrSR_IR_RECEIVE_ON (\
+ IrSR_RXPL_NEG_IS_ZERO | \
+ IrSR_TXPL_POS_IS_ZERO | \
+ IrSR_XMODE_PULSE_3_16 | \
+ IrSR_RCVEIR_IR_MODE | \
+ IrSR_XMITIR_UART_MODE)
+
+#define IrSR_IR_TRANSMIT_ON (\
+ IrSR_RXPL_NEG_IS_ZERO | \
+ IrSR_TXPL_POS_IS_ZERO | \
+ IrSR_XMODE_PULSE_3_16 | \
+ IrSR_RCVEIR_UART_MODE | \
+ IrSR_XMITIR_IR_MODE)
+
+/* macros for registers read/write */
+#define ficp_writel(irda, val, off) \
+ do { \
+ dev_vdbg(irda->dev, \
+ "%s():%d ficp_writel(0x%x, %s)\n", \
+ __func__, __LINE__, (val), #off); \
+ writel_relaxed((val), (irda)->irda_base + (off)); \
+ } while (0)
+
+#define ficp_readl(irda, off) \
+ ({ \
+ unsigned int _v; \
+ _v = readl_relaxed((irda)->irda_base + (off)); \
+ dev_vdbg(irda->dev, \
+ "%s():%d ficp_readl(%s): 0x%x\n", \
+ __func__, __LINE__, #off, _v); \
+ _v; \
+ })
+
+#define stuart_writel(irda, val, off) \
+ do { \
+ dev_vdbg(irda->dev, \
+ "%s():%d stuart_writel(0x%x, %s)\n", \
+ __func__, __LINE__, (val), #off); \
+ writel_relaxed((val), (irda)->stuart_base + (off)); \
+ } while (0)
+
+#define stuart_readl(irda, off) \
+ ({ \
+ unsigned int _v; \
+ _v = readl_relaxed((irda)->stuart_base + (off)); \
+ dev_vdbg(irda->dev, \
+ "%s():%d stuart_readl(%s): 0x%x\n", \
+ __func__, __LINE__, #off, _v); \
+ _v; \
+ })
+
+struct pxa_irda {
+ int speed;
+ int newspeed;
+ unsigned long long last_clk;
+
+ void __iomem *stuart_base;
+ void __iomem *irda_base;
+ unsigned char *dma_rx_buff;
+ unsigned char *dma_tx_buff;
+ dma_addr_t dma_rx_buff_phy;
+ dma_addr_t dma_tx_buff_phy;
+ unsigned int dma_tx_buff_len;
+ struct dma_chan *txdma;
+ struct dma_chan *rxdma;
+ dma_cookie_t rx_cookie;
+ dma_cookie_t tx_cookie;
+ int drcmr_rx;
+ int drcmr_tx;
+
+ int uart_irq;
+ int icp_irq;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ struct device *dev;
+ struct pxaficp_platform_data *pdata;
+ struct clk *fir_clk;
+ struct clk *sir_clk;
+ struct clk *cur_clk;
+};
+
+static int pxa_irda_set_speed(struct pxa_irda *si, int speed);
+
+static inline void pxa_irda_disable_clk(struct pxa_irda *si)
+{
+ if (si->cur_clk)
+ clk_disable_unprepare(si->cur_clk);
+ si->cur_clk = NULL;
+}
+
+static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
+{
+ si->cur_clk = si->fir_clk;
+ clk_prepare_enable(si->fir_clk);
+}
+
+static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
+{
+ si->cur_clk = si->sir_clk;
+ clk_prepare_enable(si->sir_clk);
+}
+
+
+#define IS_FIR(si) ((si)->speed >= 4000000)
+#define IRDA_FRAME_SIZE_LIMIT 2047
+
+static void pxa_irda_fir_dma_rx_irq(void *data);
+static void pxa_irda_fir_dma_tx_irq(void *data);
+
+inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_single(si->rxdma, si->dma_rx_buff_phy,
+ IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(si->dev, "prep_slave_sg() failed\n");
+ return;
+ }
+ tx->callback = pxa_irda_fir_dma_rx_irq;
+ tx->callback_param = si;
+ si->rx_cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(si->rxdma);
+}
+
+inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_single(si->txdma, si->dma_tx_buff_phy,
+ si->dma_tx_buff_len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(si->dev, "prep_slave_sg() failed\n");
+ return;
+ }
+ tx->callback = pxa_irda_fir_dma_tx_irq;
+ tx->callback_param = si;
+ si->tx_cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(si->rxdma);
+}
+
+/*
+ * Set the IrDA communications mode.
+ */
+static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
+{
+ if (si->pdata->transceiver_mode)
+ si->pdata->transceiver_mode(si->dev, mode);
+ else {
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_set_value(si->pdata->gpio_pwdown,
+ !(mode & IR_OFF) ^
+ !si->pdata->gpio_pwdown_inverted);
+ pxa2xx_transceiver_mode(si->dev, mode);
+ }
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
+{
+ unsigned long flags;
+ unsigned int divisor;
+
+ switch (speed) {
+ case 9600: case 19200: case 38400:
+ case 57600: case 115200:
+
+ /* refer to PXA250/210 Developer's Manual 10-7 */
+ /* BaudRate = 14.7456 MHz / (16*Divisor) */
+ divisor = 14745600 / (16 * speed);
+
+ local_irq_save(flags);
+
+ if (IS_FIR(si)) {
+ /* stop RX DMA */
+ dmaengine_terminate_all(si->rxdma);
+ /* disable FICP */
+ ficp_writel(si, 0, ICCR0);
+ pxa_irda_disable_clk(si);
+
+ /* set board transceiver to SIR mode */
+ pxa_irda_set_mode(si, IR_SIRMODE);
+
+ /* enable the STUART clock */
+ pxa_irda_enable_sirclk(si);
+ }
+
+ /* disable STUART first */
+ stuart_writel(si, 0, STIER);
+
+ /* access DLL & DLH */
+ stuart_writel(si, stuart_readl(si, STLCR) | LCR_DLAB, STLCR);
+ stuart_writel(si, divisor & 0xff, STDLL);
+ stuart_writel(si, divisor >> 8, STDLH);
+ stuart_writel(si, stuart_readl(si, STLCR) & ~LCR_DLAB, STLCR);
+
+ si->speed = speed;
+ stuart_writel(si, IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6,
+ STISR);
+ stuart_writel(si, IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE,
+ STIER);
+
+ local_irq_restore(flags);
+ break;
+
+ case 4000000:
+ local_irq_save(flags);
+
+ /* disable STUART */
+ stuart_writel(si, 0, STIER);
+ stuart_writel(si, 0, STISR);
+ pxa_irda_disable_clk(si);
+
+ /* disable FICP first */
+ ficp_writel(si, 0, ICCR0);
+
+ /* set board transceiver to FIR mode */
+ pxa_irda_set_mode(si, IR_FIRMODE);
+
+ /* enable the FICP clock */
+ pxa_irda_enable_firclk(si);
+
+ si->speed = speed;
+ pxa_irda_fir_dma_rx_start(si);
+ ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
+
+ local_irq_restore(flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* SIR interrupt service routine. */
+static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pxa_irda *si = netdev_priv(dev);
+ int iir, lsr, data;
+
+ iir = stuart_readl(si, STIIR);
+
+ switch (iir & 0x0F) {
+ case 0x06: /* Receiver Line Status */
+ lsr = stuart_readl(si, STLSR);
+ while (lsr & LSR_FIFOE) {
+ data = stuart_readl(si, STRBR);
+ if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
+ printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
+ dev->stats.rx_errors++;
+ if (lsr & LSR_FE)
+ dev->stats.rx_frame_errors++;
+ if (lsr & LSR_OE)
+ dev->stats.rx_fifo_errors++;
+ } else {
+ dev->stats.rx_bytes++;
+ async_unwrap_char(dev, &dev->stats,
+ &si->rx_buff, data);
+ }
+ lsr = stuart_readl(si, STLSR);
+ }
+ si->last_clk = sched_clock();
+ break;
+
+ case 0x04: /* Received Data Available */
+ /* forth through */
+
+ case 0x0C: /* Character Timeout Indication */
+ do {
+ dev->stats.rx_bytes++;
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff,
+ stuart_readl(si, STRBR));
+ } while (stuart_readl(si, STLSR) & LSR_DR);
+ si->last_clk = sched_clock();
+ break;
+
+ case 0x02: /* Transmit FIFO Data Request */
+ while ((si->tx_buff.len) &&
+ (stuart_readl(si, STLSR) & LSR_TDRQ)) {
+ stuart_writel(si, *si->tx_buff.data++, STTHR);
+ si->tx_buff.len -= 1;
+ }
+
+ if (si->tx_buff.len == 0) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
+
+ /* We need to ensure that the transmitter has finished. */
+ while ((stuart_readl(si, STLSR) & LSR_TEMT) == 0)
+ cpu_relax();
+ si->last_clk = sched_clock();
+
+ /*
+ * Ok, we've finished transmitting. Now enable
+ * the receiver. Sometimes we get a receive IRQ
+ * immediately after a transmit...
+ */
+ if (si->newspeed) {
+ pxa_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ } else {
+ /* enable IR Receiver, disable IR Transmitter */
+ stuart_writel(si, IrSR_IR_RECEIVE_ON |
+ IrSR_XMODE_PULSE_1_6, STISR);
+ /* enable STUART and receive interrupts */
+ stuart_writel(si, IER_UUE | IER_RLSE |
+ IER_RAVIE | IER_RTIOE, STIER);
+ }
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+ }
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* FIR Receive DMA interrupt handler */
+static void pxa_irda_fir_dma_rx_irq(void *data)
+{
+ struct net_device *dev = data;
+ struct pxa_irda *si = netdev_priv(dev);
+
+ dmaengine_terminate_all(si->rxdma);
+ netdev_dbg(dev, "pxa_ir: fir rx dma bus error\n");
+}
+
+/* FIR Transmit DMA interrupt handler */
+static void pxa_irda_fir_dma_tx_irq(void *data)
+{
+ struct net_device *dev = data;
+ struct pxa_irda *si = netdev_priv(dev);
+
+ dmaengine_terminate_all(si->txdma);
+ if (dmaengine_tx_status(si->txdma, si->tx_cookie, NULL) == DMA_ERROR) {
+ dev->stats.tx_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += si->dma_tx_buff_len;
+ }
+
+ while (ficp_readl(si, ICSR1) & ICSR1_TBY)
+ cpu_relax();
+ si->last_clk = sched_clock();
+
+ /*
+ * HACK: It looks like the TBY bit is dropped too soon.
+ * Without this delay things break.
+ */
+ udelay(120);
+
+ if (si->newspeed) {
+ pxa_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ } else {
+ int i = 64;
+
+ ficp_writel(si, 0, ICCR0);
+ pxa_irda_fir_dma_rx_start(si);
+ while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
+ ficp_readl(si, ICDR);
+ ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
+
+ if (i < 0)
+ printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
+ }
+ netif_wake_queue(dev);
+}
+
+/* EIF(Error in FIFO/End in Frame) handler for FIR */
+static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
+{
+ unsigned int len, stat, data;
+ struct dma_tx_state state;
+
+ /* Get the current data position. */
+
+ dmaengine_tx_status(si->rxdma, si->rx_cookie, &state);
+ len = IRDA_FRAME_SIZE_LIMIT - state.residue;
+
+ do {
+ /* Read Status, and then Data. */
+ stat = ficp_readl(si, ICSR1);
+ rmb();
+ data = ficp_readl(si, ICDR);
+
+ if (stat & (ICSR1_CRE | ICSR1_ROR)) {
+ dev->stats.rx_errors++;
+ if (stat & ICSR1_CRE) {
+ printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
+ dev->stats.rx_crc_errors++;
+ }
+ if (stat & ICSR1_ROR) {
+ printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
+ dev->stats.rx_over_errors++;
+ }
+ } else {
+ si->dma_rx_buff[len++] = data;
+ }
+ /* If we hit the end of frame, there's no point in continuing. */
+ if (stat & ICSR1_EOF)
+ break;
+ } while (ficp_readl(si, ICSR0) & ICSR0_EIF);
+
+ if (stat & ICSR1_EOF) {
+ /* end of frame. */
+ struct sk_buff *skb;
+
+ if (icsr0 & ICSR0_FRE) {
+ printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ skb = alloc_skb(len+1,GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ /* Align IP header to 20 bytes */
+ skb_reserve(skb, 1);
+ skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
+ skb_put(skb, len);
+
+ /* Feed it to IrLAP */
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+}
+
+/* FIR interrupt handler */
+static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pxa_irda *si = netdev_priv(dev);
+ int icsr0, i = 64;
+
+ /* stop RX DMA */
+ dmaengine_terminate_all(si->rxdma);
+ si->last_clk = sched_clock();
+ icsr0 = ficp_readl(si, ICSR0);
+
+ if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
+ if (icsr0 & ICSR0_FRE) {
+ printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
+ dev->stats.rx_frame_errors++;
+ } else {
+ printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
+ dev->stats.rx_errors++;
+ }
+ ficp_writel(si, icsr0 & (ICSR0_FRE | ICSR0_RAB), ICSR0);
+ }
+
+ if (icsr0 & ICSR0_EIF) {
+ /* An error in FIFO occurred, or there is a end of frame */
+ pxa_irda_fir_irq_eif(si, dev, icsr0);
+ }
+
+ ficp_writel(si, 0, ICCR0);
+ pxa_irda_fir_dma_rx_start(si);
+ while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
+ ficp_readl(si, ICDR);
+ ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
+
+ if (i < 0)
+ printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
+
+ return IRQ_HANDLED;
+}
+
+/* hard_xmit interface of irda device */
+static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa_irda *si = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ /*
+ * Does this packet contain a request to change the interface
+ * speed? If so, remember it until we complete the transmission
+ * of this frame.
+ */
+ if (speed != si->speed && speed != -1)
+ si->newspeed = speed;
+
+ /*
+ * If this is an empty frame, we can bypass a lot.
+ */
+ if (skb->len == 0) {
+ if (si->newspeed) {
+ si->newspeed = 0;
+ pxa_irda_set_speed(si, speed);
+ }
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+ if (!IS_FIR(si)) {
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
+
+ /* Disable STUART interrupts and switch to transmit mode. */
+ stuart_writel(si, 0, STIER);
+ stuart_writel(si, IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6,
+ STISR);
+
+ /* enable STUART and transmit interrupts */
+ stuart_writel(si, IER_UUE | IER_TIE, STIER);
+ } else {
+ unsigned long mtt = irda_get_mtt(skb);
+
+ si->dma_tx_buff_len = skb->len;
+ skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
+
+ if (mtt)
+ while ((sched_clock() - si->last_clk) * 1000 < mtt)
+ cpu_relax();
+
+ /* stop RX DMA, disable FICP */
+ dmaengine_terminate_all(si->rxdma);
+ ficp_writel(si, 0, ICCR0);
+
+ pxa_irda_fir_dma_tx_start(si);
+ ficp_writel(si, ICCR0_ITR | ICCR0_TXE, ICCR0);
+ }
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct pxa_irda *si = netdev_priv(dev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (netif_running(dev)) {
+ ret = pxa_irda_set_speed(si,
+ rq->ifr_baudrate);
+ } else {
+ printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ ret = 0;
+ rq->ifr_receiving = IS_FIR(si) ? 0
+ : si->rx_buff.state != OUTSIDE_FRAME;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void pxa_irda_startup(struct pxa_irda *si)
+{
+ /* Disable STUART interrupts */
+ stuart_writel(si, 0, STIER);
+ /* enable STUART interrupt to the processor */
+ stuart_writel(si, MCR_OUT2, STMCR);
+ /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
+ stuart_writel(si, LCR_WLS0 | LCR_WLS1, STLCR);
+ /* enable FIFO, we use FIFO to improve performance */
+ stuart_writel(si, FCR_TRFIFOE | FCR_ITL_32, STFCR);
+
+ /* disable FICP */
+ ficp_writel(si, 0, ICCR0);
+ /* configure FICP ICCR2 */
+ ficp_writel(si, ICCR2_TXP | ICCR2_TRIG_32, ICCR2);
+
+ /* force SIR reinitialization */
+ si->speed = 4000000;
+ pxa_irda_set_speed(si, 9600);
+
+ printk(KERN_DEBUG "pxa_ir: irda startup\n");
+}
+
+static void pxa_irda_shutdown(struct pxa_irda *si)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* disable STUART and interrupt */
+ stuart_writel(si, 0, STIER);
+ /* disable STUART SIR mode */
+ stuart_writel(si, 0, STISR);
+
+ /* disable DMA */
+ dmaengine_terminate_all(si->rxdma);
+ dmaengine_terminate_all(si->txdma);
+ /* disable FICP */
+ ficp_writel(si, 0, ICCR0);
+
+ /* disable the STUART or FICP clocks */
+ pxa_irda_disable_clk(si);
+
+ local_irq_restore(flags);
+
+ /* power off board transceiver */
+ pxa_irda_set_mode(si, IR_OFF);
+
+ printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
+}
+
+static int pxa_irda_start(struct net_device *dev)
+{
+ struct pxa_irda *si = netdev_priv(dev);
+ dma_cap_mask_t mask;
+ struct dma_slave_config config;
+ struct pxad_param param;
+ int err;
+
+ si->speed = 9600;
+
+ err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq1;
+
+ err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq2;
+
+ /*
+ * The interrupt must remain disabled for now.
+ */
+ disable_irq(si->uart_irq);
+ disable_irq(si->icp_irq);
+
+ err = -EBUSY;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.src_addr = (dma_addr_t)si->irda_base + ICDR;
+ config.dst_addr = (dma_addr_t)si->irda_base + ICDR;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+
+ param.drcmr = si->drcmr_rx;
+ si->rxdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev, "rx");
+ if (!si->rxdma)
+ goto err_rx_dma;
+
+ param.drcmr = si->drcmr_tx;
+ si->txdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev, "tx");
+ if (!si->txdma)
+ goto err_tx_dma;
+
+ err = dmaengine_slave_config(si->rxdma, &config);
+ if (err)
+ goto err_dma_rx_buff;
+ err = dmaengine_slave_config(si->txdma, &config);
+ if (err)
+ goto err_dma_rx_buff;
+
+ err = -ENOMEM;
+ si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
+ &si->dma_rx_buff_phy, GFP_KERNEL);
+ if (!si->dma_rx_buff)
+ goto err_dma_rx_buff;
+
+ si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
+ &si->dma_tx_buff_phy, GFP_KERNEL);
+ if (!si->dma_tx_buff)
+ goto err_dma_tx_buff;
+
+ /* Setup the serial port for the initial speed. */
+ pxa_irda_startup(si);
+
+ /*
+ * Open a new IrLAP layer instance.
+ */
+ si->irlap = irlap_open(dev, &si->qos, "pxa");
+ err = -ENOMEM;
+ if (!si->irlap)
+ goto err_irlap;
+
+ /*
+ * Now enable the interrupt and start the queue
+ */
+ enable_irq(si->uart_irq);
+ enable_irq(si->icp_irq);
+ netif_start_queue(dev);
+
+ printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
+
+ return 0;
+
+err_irlap:
+ pxa_irda_shutdown(si);
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
+err_dma_tx_buff:
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
+err_dma_rx_buff:
+ dma_release_channel(si->txdma);
+err_tx_dma:
+ dma_release_channel(si->rxdma);
+err_rx_dma:
+ free_irq(si->icp_irq, dev);
+err_irq2:
+ free_irq(si->uart_irq, dev);
+err_irq1:
+
+ return err;
+}
+
+static int pxa_irda_stop(struct net_device *dev)
+{
+ struct pxa_irda *si = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ pxa_irda_shutdown(si);
+
+ /* Stop IrLAP */
+ if (si->irlap) {
+ irlap_close(si->irlap);
+ si->irlap = NULL;
+ }
+
+ free_irq(si->uart_irq, dev);
+ free_irq(si->icp_irq, dev);
+
+ dmaengine_terminate_all(si->rxdma);
+ dmaengine_terminate_all(si->txdma);
+ dma_release_channel(si->rxdma);
+ dma_release_channel(si->txdma);
+
+ if (si->dma_rx_buff)
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
+ if (si->dma_tx_buff)
+ dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
+
+ printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
+ return 0;
+}
+
+static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(_dev);
+ struct pxa_irda *si;
+
+ if (dev && netif_running(dev)) {
+ si = netdev_priv(dev);
+ netif_device_detach(dev);
+ pxa_irda_shutdown(si);
+ }
+
+ return 0;
+}
+
+static int pxa_irda_resume(struct platform_device *_dev)
+{
+ struct net_device *dev = platform_get_drvdata(_dev);
+ struct pxa_irda *si;
+
+ if (dev && netif_running(dev)) {
+ si = netdev_priv(dev);
+ pxa_irda_startup(si);
+ netif_device_attach(dev);
+ netif_wake_queue(dev);
+ }
+
+ return 0;
+}
+
+
+static int pxa_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+static const struct net_device_ops pxa_irda_netdev_ops = {
+ .ndo_open = pxa_irda_start,
+ .ndo_stop = pxa_irda_stop,
+ .ndo_start_xmit = pxa_irda_hard_xmit,
+ .ndo_do_ioctl = pxa_irda_ioctl,
+};
+
+static int pxa_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct resource *res;
+ struct pxa_irda *si;
+ void __iomem *ficp, *stuart;
+ unsigned int baudrate_mask;
+ int err;
+
+ if (!pdev->dev.platform_data)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ficp = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ficp)) {
+ dev_err(&pdev->dev, "resource ficp not defined\n");
+ return PTR_ERR(ficp);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ stuart = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(stuart)) {
+ dev_err(&pdev->dev, "resource stuart not defined\n");
+ return PTR_ERR(stuart);
+ }
+
+ dev = alloc_irdadev(sizeof(struct pxa_irda));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_mem_1;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ si = netdev_priv(dev);
+ si->dev = &pdev->dev;
+ si->pdata = pdev->dev.platform_data;
+
+ si->irda_base = ficp;
+ si->stuart_base = stuart;
+ si->uart_irq = platform_get_irq(pdev, 0);
+ si->icp_irq = platform_get_irq(pdev, 1);
+
+ si->sir_clk = devm_clk_get(&pdev->dev, "UARTCLK");
+ si->fir_clk = devm_clk_get(&pdev->dev, "FICPCLK");
+ if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
+ err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
+ goto err_mem_4;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res)
+ si->drcmr_rx = res->start;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res)
+ si->drcmr_tx = res->start;
+
+ /*
+ * Initialise the SIR buffers
+ */
+ err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
+ if (err)
+ goto err_mem_4;
+ err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
+ if (err)
+ goto err_mem_5;
+
+ if (gpio_is_valid(si->pdata->gpio_pwdown)) {
+ err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
+ if (err)
+ goto err_startup;
+ err = gpio_direction_output(si->pdata->gpio_pwdown,
+ !si->pdata->gpio_pwdown_inverted);
+ if (err) {
+ gpio_free(si->pdata->gpio_pwdown);
+ goto err_startup;
+ }
+ }
+
+ if (si->pdata->startup) {
+ err = si->pdata->startup(si->dev);
+ if (err)
+ goto err_startup;
+ }
+
+ if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
+ dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
+
+ dev->netdev_ops = &pxa_irda_netdev_ops;
+
+ irda_init_max_qos_capabilies(&si->qos);
+
+ baudrate_mask = 0;
+ if (si->pdata->transceiver_cap & IR_SIRMODE)
+ baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ if (si->pdata->transceiver_cap & IR_FIRMODE)
+ baudrate_mask |= IR_4000000 << 8;
+
+ si->qos.baud_rate.bits &= baudrate_mask;
+ si->qos.min_turn_time.bits = 7; /* 1ms or more */
+
+ irda_qos_bits_to_value(&si->qos);
+
+ err = register_netdev(dev);
+
+ if (err == 0)
+ platform_set_drvdata(pdev, dev);
+
+ if (err) {
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+err_startup:
+ kfree(si->tx_buff.head);
+err_mem_5:
+ kfree(si->rx_buff.head);
+err_mem_4:
+ free_netdev(dev);
+ }
+err_mem_1:
+ return err;
+}
+
+static int pxa_irda_remove(struct platform_device *_dev)
+{
+ struct net_device *dev = platform_get_drvdata(_dev);
+
+ if (dev) {
+ struct pxa_irda *si = netdev_priv(dev);
+ unregister_netdev(dev);
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_free(si->pdata->gpio_pwdown);
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver pxa_ir_driver = {
+ .driver = {
+ .name = "pxa2xx-ir",
+ },
+ .probe = pxa_irda_probe,
+ .remove = pxa_irda_remove,
+ .suspend = pxa_irda_suspend,
+ .resume = pxa_irda_resume,
+};
+
+module_platform_driver(pxa_ir_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/staging/irda/drivers/sa1100_ir.c b/drivers/staging/irda/drivers/sa1100_ir.c
new file mode 100644
index 000000000000..b6e44ff4e373
--- /dev/null
+++ b/drivers/staging/irda/drivers/sa1100_ir.c
@@ -0,0 +1,1150 @@
+/*
+ * linux/drivers/net/irda/sa1100_ir.c
+ *
+ * Copyright (C) 2000-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Infra-red driver for the StrongARM SA1100 embedded microprocessor
+ *
+ * Note that we don't have to worry about the SA1111's DMA bugs in here,
+ * so we use the straight forward dma_map_* functions with a null pointer.
+ *
+ * This driver takes one kernel command line parameter, sa1100ir=, with
+ * the following options:
+ * max_rate:baudrate - set the maximum baud rate
+ * power_level:level - set the transmitter power level
+ * tx_lpm:0|1 - set transmit low power mode
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sa11x0-dma.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <mach/hardware.h>
+#include <linux/platform_data/irda-sa11x0.h>
+
+static int power_level = 3;
+static int tx_lpm;
+static int max_rate = 4000000;
+
+struct sa1100_buf {
+ struct device *dev;
+ struct sk_buff *skb;
+ struct scatterlist sg;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+};
+
+struct sa1100_irda {
+ unsigned char utcr4;
+ unsigned char power;
+ unsigned char open;
+
+ int speed;
+ int newspeed;
+
+ struct sa1100_buf dma_rx;
+ struct sa1100_buf dma_tx;
+
+ struct device *dev;
+ struct irda_platform_data *pdata;
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *);
+ irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *);
+};
+
+static int sa1100_irda_set_speed(struct sa1100_irda *, int);
+
+#define IS_FIR(si) ((si)->speed >= 4000000)
+
+#define HPSIR_MAX_RXLEN 2047
+
+static struct dma_slave_config sa1100_irda_sir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2UTDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 4,
+};
+
+static struct dma_slave_config sa1100_irda_fir_rx = {
+ .direction = DMA_FROM_DEVICE,
+ .src_addr = __PREG(Ser2HSDR),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = 8,
+};
+
+static struct dma_slave_config sa1100_irda_fir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2HSDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 8,
+};
+
+static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
+{
+ struct dma_chan *chan = buf->chan;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, buf->cookie, &state);
+ if (status != DMA_PAUSED)
+ return 0;
+
+ return sg_dma_len(&buf->sg) - state.residue;
+}
+
+static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
+ const char *name, struct dma_slave_config *cfg)
+{
+ dma_cap_mask_t m;
+ int ret;
+
+ dma_cap_zero(m);
+ dma_cap_set(DMA_SLAVE, m);
+
+ buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
+ if (!buf->chan) {
+ dev_err(dev, "unable to request DMA channel for %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ ret = dmaengine_slave_config(buf->chan, cfg);
+ if (ret)
+ dev_warn(dev, "DMA slave_config for %s returned %d\n",
+ name, ret);
+
+ buf->dev = buf->chan->device->dev;
+
+ return 0;
+}
+
+static void sa1100_irda_dma_start(struct sa1100_buf *buf,
+ enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = buf->chan;
+
+ desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (desc) {
+ desc->callback = cb;
+ desc->callback_param = cb_p;
+ buf->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ }
+}
+
+/*
+ * Allocate and map the receive buffer, unless it is already allocated.
+ */
+static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
+{
+ if (si->dma_rx.skb)
+ return 0;
+
+ si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
+ if (!si->dma_rx.skb) {
+ printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Align any IP headers that may be contained
+ * within the frame.
+ */
+ skb_reserve(si->dma_rx.skb, 1);
+
+ sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
+ if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
+ dev_kfree_skb_any(si->dma_rx.skb);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * We want to get here as soon as possible, and get the receiver setup.
+ * We use the existing buffer.
+ */
+static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
+{
+ if (!si->dma_rx.skb) {
+ printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
+ return;
+ }
+
+ /*
+ * First empty receive FIFO
+ */
+ Ser2HSCR0 = HSCR0_HSSP;
+
+ /*
+ * Enable the DMA, receiver and receive interrupt.
+ */
+ dmaengine_terminate_all(si->dma_rx.chan);
+ sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL);
+
+ Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
+}
+
+static void sa1100_irda_check_speed(struct sa1100_irda *si)
+{
+ if (si->newspeed) {
+ sa1100_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ }
+}
+
+/*
+ * HP-SIR format support.
+ */
+static void sa1100_irda_sirtxdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
+
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
+ dev_kfree_skb(si->dma_tx.skb);
+ si->dma_tx.skb = NULL;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
+
+ /* We need to ensure that the transmitter has finished. */
+ do
+ rmb();
+ while (Ser2UTSR1 & UTSR1_TBY);
+
+ /*
+ * Ok, we've finished transmitting. Now enable the receiver.
+ * Sometimes we get a receive IRQ immediately after a transmit...
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ sa1100_irda_check_speed(si);
+
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+}
+
+static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
+ struct sa1100_irda *si)
+{
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
+ si->tx_buff.truesize);
+
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
+
+ /*
+ * The mean turn-around time is enforced by XBOF padding,
+ * so we don't have to do anything special here.
+ */
+ Ser2UTCR3 = UTCR3_TXE;
+
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si)
+{
+ int status;
+
+ status = Ser2UTSR0;
+
+ /*
+ * Deal with any receive errors first. The bytes in error may be
+ * the only bytes in the receive FIFO, so we do this first.
+ */
+ while (status & UTSR0_EIF) {
+ int stat, data;
+
+ stat = Ser2UTSR1;
+ data = Ser2UTDR;
+
+ if (stat & (UTSR1_FRE | UTSR1_ROR)) {
+ dev->stats.rx_errors++;
+ if (stat & UTSR1_FRE)
+ dev->stats.rx_frame_errors++;
+ if (stat & UTSR1_ROR)
+ dev->stats.rx_fifo_errors++;
+ } else
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
+
+ status = Ser2UTSR0;
+ }
+
+ /*
+ * We must clear certain bits.
+ */
+ Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
+
+ if (status & UTSR0_RFS) {
+ /*
+ * There are at least 4 bytes in the FIFO. Read 3 bytes
+ * and leave the rest to the block below.
+ */
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+ }
+
+ if (status & (UTSR0_RFS | UTSR0_RID)) {
+ /*
+ * Fifo contains more than 1 character.
+ */
+ do {
+ async_unwrap_char(dev, &dev->stats, &si->rx_buff,
+ Ser2UTDR);
+ } while (Ser2UTSR1 & UTSR1_RNE);
+
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * FIR format support.
+ */
+static void sa1100_irda_firtxdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ /*
+ * Wait for the transmission to complete. Unfortunately,
+ * the hardware doesn't give us an interrupt to indicate
+ * "end of frame".
+ */
+ do
+ rmb();
+ while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
+
+ /*
+ * Clear the transmit underrun bit.
+ */
+ Ser2HSSR0 = HSSR0_TUR;
+
+ /*
+ * Do we need to change speed? Note that we're lazy
+ * here - we don't free the old dma_rx.skb. We don't need
+ * to allocate a buffer either.
+ */
+ sa1100_irda_check_speed(si);
+
+ /*
+ * Start reception. This disables the transmitter for
+ * us. This will be using the existing RX buffer.
+ */
+ sa1100_irda_rx_dma_start(si);
+
+ /* Account and free the packet. */
+ skb = si->dma_tx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
+ dev->stats.tx_packets ++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ si->dma_tx.skb = NULL;
+ }
+
+ /*
+ * Make sure that the TX queue is available for sending
+ * (for retries). TX has priority over RX at all times.
+ */
+ netif_wake_queue(dev);
+}
+
+static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
+ struct sa1100_irda *si)
+{
+ int mtt = irda_get_mtt(skb);
+
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev);
+
+ /*
+ * If we have a mean turn-around time, impose the specified
+ * specified delay. We could shorten this by timing from
+ * the point we received the packet.
+ */
+ if (mtt)
+ udelay(mtt);
+
+ Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE;
+
+ return NETDEV_TX_OK;
+}
+
+static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
+{
+ struct sk_buff *skb = si->dma_rx.skb;
+ unsigned int len, stat, data;
+
+ if (!skb) {
+ printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
+ return;
+ }
+
+ /*
+ * Get the current data position.
+ */
+ len = sa1100_irda_dma_xferred(&si->dma_rx);
+ if (len > HPSIR_MAX_RXLEN)
+ len = HPSIR_MAX_RXLEN;
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
+
+ do {
+ /*
+ * Read Status, and then Data.
+ */
+ stat = Ser2HSSR1;
+ rmb();
+ data = Ser2HSDR;
+
+ if (stat & (HSSR1_CRE | HSSR1_ROR)) {
+ dev->stats.rx_errors++;
+ if (stat & HSSR1_CRE)
+ dev->stats.rx_crc_errors++;
+ if (stat & HSSR1_ROR)
+ dev->stats.rx_frame_errors++;
+ } else
+ skb->data[len++] = data;
+
+ /*
+ * If we hit the end of frame, there's
+ * no point in continuing.
+ */
+ if (stat & HSSR1_EOF)
+ break;
+ } while (Ser2HSSR0 & HSSR0_EIF);
+
+ if (stat & HSSR1_EOF) {
+ si->dma_rx.skb = NULL;
+
+ skb_put(skb, len);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ /*
+ * Before we pass the buffer up, allocate a new one.
+ */
+ sa1100_irda_rx_alloc(si);
+
+ netif_rx(skb);
+ } else {
+ /*
+ * Remap the buffer - it was previously mapped, and we
+ * hope that this succeeds.
+ */
+ dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
+ }
+}
+
+/*
+ * We only have to handle RX events here; transmit events go via the TX
+ * DMA handler. We disable RX, process, and the restart RX.
+ */
+static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si)
+{
+ /*
+ * Stop RX DMA
+ */
+ dmaengine_pause(si->dma_rx.chan);
+
+ /*
+ * Framing error - we throw away the packet completely.
+ * Clearing RXE flushes the error conditions and data
+ * from the fifo.
+ */
+ if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
+ dev->stats.rx_errors++;
+
+ if (Ser2HSSR0 & HSSR0_FRE)
+ dev->stats.rx_frame_errors++;
+
+ /*
+ * Clear out the DMA...
+ */
+ Ser2HSCR0 = HSCR0_HSSP;
+
+ /*
+ * Clear selected status bits now, so we
+ * don't miss them next time around.
+ */
+ Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB;
+ }
+
+ /*
+ * Deal with any receive errors. The any of the lowest
+ * 8 bytes in the FIFO may contain an error. We must read
+ * them one by one. The "error" could even be the end of
+ * packet!
+ */
+ if (Ser2HSSR0 & HSSR0_EIF)
+ sa1100_irda_fir_error(si, dev);
+
+ /*
+ * No matter what happens, we must restart reception.
+ */
+ sa1100_irda_rx_dma_start(si);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set the IrDA communications speed.
+ */
+static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
+{
+ unsigned long flags;
+ int brd, ret = -EINVAL;
+
+ switch (speed) {
+ case 9600: case 19200: case 38400:
+ case 57600: case 115200:
+ brd = 3686400 / (16 * speed) - 1;
+
+ /* Stop the receive DMA, and configure transmit. */
+ if (IS_FIR(si)) {
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_sir_tx);
+ }
+
+ local_irq_save(flags);
+
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+
+ Ser2UTCR1 = brd >> 8;
+ Ser2UTCR2 = brd;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ si->speed = speed;
+ si->tx_start = sa1100_irda_sir_tx_start;
+ si->irq = sa1100_irda_sir_irq;
+
+ local_irq_restore(flags);
+ ret = 0;
+ break;
+
+ case 4000000:
+ if (!IS_FIR(si))
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_fir_tx);
+
+ local_irq_save(flags);
+
+ Ser2HSSR0 = 0xff;
+ Ser2HSCR0 = HSCR0_HSSP;
+ Ser2UTCR3 = 0;
+
+ si->speed = speed;
+ si->tx_start = sa1100_irda_fir_tx_start;
+ si->irq = sa1100_irda_fir_irq;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ sa1100_irda_rx_alloc(si);
+ sa1100_irda_rx_dma_start(si);
+
+ local_irq_restore(flags);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Control the power state of the IrDA transmitter.
+ * State:
+ * 0 - off
+ * 1 - short range, lowest power
+ * 2 - medium range, medium power
+ * 3 - maximum range, high power
+ *
+ * Currently, only assabet is known to support this.
+ */
+static int
+__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret = 0;
+ if (si->pdata->set_power)
+ ret = si->pdata->set_power(si->dev, state);
+ return ret;
+}
+
+static inline int
+sa1100_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret;
+
+ ret = __sa1100_irda_set_power(si, state);
+ if (ret == 0)
+ si->power = state;
+
+ return ret;
+}
+
+static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct sa1100_irda *si = netdev_priv(dev);
+
+ return si->irq(dev, si);
+}
+
+static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sa1100_irda *si = netdev_priv(dev);
+ int speed = irda_get_next_speed(skb);
+
+ /*
+ * Does this packet contain a request to change the interface
+ * speed? If so, remember it until we complete the transmission
+ * of this frame.
+ */
+ if (speed != si->speed && speed != -1)
+ si->newspeed = speed;
+
+ /* If this is an empty frame, we can bypass a lot. */
+ if (skb->len == 0) {
+ sa1100_irda_check_speed(si);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+ /* We must not already have a skb to transmit... */
+ BUG_ON(si->dma_tx.skb);
+
+ return si->tx_start(skb, dev, si);
+}
+
+static int
+sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct sa1100_irda *si = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (si->open) {
+ ret = sa1100_irda_set_speed(si,
+ rq->ifr_baudrate);
+ } else {
+ printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+
+ case SIOCSMEDIABUSY:
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGRECEIVING:
+ rq->ifr_receiving = IS_FIR(si) ? 0
+ : si->rx_buff.state != OUTSIDE_FRAME;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int sa1100_irda_startup(struct sa1100_irda *si)
+{
+ int ret;
+
+ /*
+ * Ensure that the ports for this device are setup correctly.
+ */
+ if (si->pdata->startup) {
+ ret = si->pdata->startup(si->dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Configure PPC for IRDA - we want to drive TXD2 low.
+ * We also want to drive this pin low during sleep.
+ */
+ PPSR &= ~PPC_TXD2;
+ PSDR &= ~PPC_TXD2;
+ PPDR |= PPC_TXD2;
+
+ /*
+ * Enable HP-SIR modulation, and ensure that the port is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+ Ser2UTCR4 = si->utcr4;
+ Ser2UTCR0 = UTCR0_8BitData;
+ Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+
+ ret = sa1100_irda_set_speed(si, si->speed = 9600);
+ if (ret) {
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+ }
+
+ return ret;
+}
+
+static void sa1100_irda_shutdown(struct sa1100_irda *si)
+{
+ /*
+ * Stop all DMA activity.
+ */
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_terminate_all(si->dma_tx.chan);
+
+ /* Disable the port. */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+}
+
+static int sa1100_irda_start(struct net_device *dev)
+{
+ struct sa1100_irda *si = netdev_priv(dev);
+ int err;
+
+ si->speed = 9600;
+
+ err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc",
+ &sa1100_irda_fir_rx);
+ if (err)
+ goto err_rx_dma;
+
+ err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr",
+ &sa1100_irda_sir_tx);
+ if (err)
+ goto err_tx_dma;
+
+ /*
+ * Setup the serial port for the specified speed.
+ */
+ err = sa1100_irda_startup(si);
+ if (err)
+ goto err_startup;
+
+ /*
+ * Open a new IrLAP layer instance.
+ */
+ si->irlap = irlap_open(dev, &si->qos, "sa1100");
+ err = -ENOMEM;
+ if (!si->irlap)
+ goto err_irlap;
+
+ err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq;
+
+ /*
+ * Now enable the interrupt and start the queue
+ */
+ si->open = 1;
+ sa1100_set_power(si, power_level); /* low power mode */
+
+ netif_start_queue(dev);
+ return 0;
+
+err_irq:
+ irlap_close(si->irlap);
+err_irlap:
+ si->open = 0;
+ sa1100_irda_shutdown(si);
+err_startup:
+ dma_release_channel(si->dma_tx.chan);
+err_tx_dma:
+ dma_release_channel(si->dma_rx.chan);
+err_rx_dma:
+ return err;
+}
+
+static int sa1100_irda_stop(struct net_device *dev)
+{
+ struct sa1100_irda *si = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ netif_stop_queue(dev);
+
+ si->open = 0;
+ sa1100_irda_shutdown(si);
+
+ /*
+ * If we have been doing any DMA activity, make sure we
+ * tidy that up cleanly.
+ */
+ skb = si->dma_rx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ si->dma_rx.skb = NULL;
+ }
+
+ skb = si->dma_tx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ si->dma_tx.skb = NULL;
+ }
+
+ /* Stop IrLAP */
+ if (si->irlap) {
+ irlap_close(si->irlap);
+ si->irlap = NULL;
+ }
+
+ /*
+ * Free resources
+ */
+ dma_release_channel(si->dma_tx.chan);
+ dma_release_channel(si->dma_rx.chan);
+ free_irq(dev->irq, dev);
+
+ sa1100_set_power(si, 0);
+
+ return 0;
+}
+
+static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+}
+
+static const struct net_device_ops sa1100_irda_netdev_ops = {
+ .ndo_open = sa1100_irda_start,
+ .ndo_stop = sa1100_irda_stop,
+ .ndo_start_xmit = sa1100_irda_hard_xmit,
+ .ndo_do_ioctl = sa1100_irda_ioctl,
+};
+
+static int sa1100_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sa1100_irda *si;
+ unsigned int baudrate_mask;
+ int err, irq;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq < 0 ? irq : -ENXIO;
+
+ err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_1;
+ err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_2;
+ err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY;
+ if (err)
+ goto err_mem_3;
+
+ dev = alloc_irdadev(sizeof(struct sa1100_irda));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_mem_4;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ si = netdev_priv(dev);
+ si->dev = &pdev->dev;
+ si->pdata = pdev->dev.platform_data;
+
+ sg_init_table(&si->dma_rx.sg, 1);
+ sg_init_table(&si->dma_tx.sg, 1);
+
+ /*
+ * Initialise the HP-SIR buffers
+ */
+ err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
+ if (err)
+ goto err_mem_5;
+ err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_5;
+
+ dev->netdev_ops = &sa1100_irda_netdev_ops;
+ dev->irq = irq;
+
+ irda_init_max_qos_capabilies(&si->qos);
+
+ /*
+ * We support original IRDA up to 115k2. (we don't currently
+ * support 4Mbps). Min Turn Time set to 1ms or greater.
+ */
+ baudrate_mask = IR_9600;
+
+ switch (max_rate) {
+ case 4000000: baudrate_mask |= IR_4000000 << 8;
+ case 115200: baudrate_mask |= IR_115200;
+ case 57600: baudrate_mask |= IR_57600;
+ case 38400: baudrate_mask |= IR_38400;
+ case 19200: baudrate_mask |= IR_19200;
+ }
+
+ si->qos.baud_rate.bits &= baudrate_mask;
+ si->qos.min_turn_time.bits = 7;
+
+ irda_qos_bits_to_value(&si->qos);
+
+ si->utcr4 = UTCR4_HPSIR;
+ if (tx_lpm)
+ si->utcr4 |= UTCR4_Z1_6us;
+
+ /*
+ * Initially enable HP-SIR modulation, and ensure that the port
+ * is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2UTCR4 = si->utcr4;
+ Ser2HSCR0 = HSCR0_UART;
+
+ err = register_netdev(dev);
+ if (err == 0)
+ platform_set_drvdata(pdev, dev);
+
+ if (err) {
+ err_mem_5:
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ err_mem_4:
+ release_mem_region(__PREG(Ser2HSCR2), 0x04);
+ err_mem_3:
+ release_mem_region(__PREG(Ser2HSCR0), 0x1c);
+ err_mem_2:
+ release_mem_region(__PREG(Ser2UTCR0), 0x24);
+ }
+ err_mem_1:
+ return err;
+}
+
+static int sa1100_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ struct sa1100_irda *si = netdev_priv(dev);
+ unregister_netdev(dev);
+ kfree(si->tx_buff.head);
+ kfree(si->rx_buff.head);
+ free_netdev(dev);
+ }
+
+ release_mem_region(__PREG(Ser2HSCR2), 0x04);
+ release_mem_region(__PREG(Ser2HSCR0), 0x1c);
+ release_mem_region(__PREG(Ser2UTCR0), 0x24);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * Suspend the IrDA interface.
+ */
+static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sa1100_irda *si;
+
+ if (!dev)
+ return 0;
+
+ si = netdev_priv(dev);
+ if (si->open) {
+ /*
+ * Stop the transmit queue
+ */
+ netif_device_detach(dev);
+ disable_irq(dev->irq);
+ sa1100_irda_shutdown(si);
+ __sa1100_irda_set_power(si, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Resume the IrDA interface.
+ */
+static int sa1100_irda_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sa1100_irda *si;
+
+ if (!dev)
+ return 0;
+
+ si = netdev_priv(dev);
+ if (si->open) {
+ /*
+ * If we missed a speed change, initialise at the new speed
+ * directly. It is debatable whether this is actually
+ * required, but in the interests of continuing from where
+ * we left off it is desirable. The converse argument is
+ * that we should re-negotiate at 9600 baud again.
+ */
+ if (si->newspeed) {
+ si->speed = si->newspeed;
+ si->newspeed = 0;
+ }
+
+ sa1100_irda_startup(si);
+ __sa1100_irda_set_power(si, si->power);
+ enable_irq(dev->irq);
+
+ /*
+ * This automatically wakes up the queue
+ */
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define sa1100_irda_suspend NULL
+#define sa1100_irda_resume NULL
+#endif
+
+static struct platform_driver sa1100ir_driver = {
+ .probe = sa1100_irda_probe,
+ .remove = sa1100_irda_remove,
+ .suspend = sa1100_irda_suspend,
+ .resume = sa1100_irda_resume,
+ .driver = {
+ .name = "sa11x0-ir",
+ },
+};
+
+static int __init sa1100_irda_init(void)
+{
+ /*
+ * Limit power level a sensible range.
+ */
+ if (power_level < 1)
+ power_level = 1;
+ if (power_level > 3)
+ power_level = 3;
+
+ return platform_driver_register(&sa1100ir_driver);
+}
+
+static void __exit sa1100_irda_exit(void)
+{
+ platform_driver_unregister(&sa1100ir_driver);
+}
+
+module_init(sa1100_irda_init);
+module_exit(sa1100_irda_exit);
+module_param(power_level, int, 0);
+module_param(tx_lpm, int, 0);
+module_param(max_rate, int, 0);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
+MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
+MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
+MODULE_ALIAS("platform:sa11x0-ir");
diff --git a/drivers/staging/irda/drivers/sh_sir.c b/drivers/staging/irda/drivers/sh_sir.c
new file mode 100644
index 000000000000..fede6864c737
--- /dev/null
+++ b/drivers/staging/irda/drivers/sh_sir.c
@@ -0,0 +1,810 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on bfin_sir.c
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/clock.h>
+
+#define DRIVER_NAME "sh_sir"
+
+#define RX_PHASE (1 << 0)
+#define TX_PHASE (1 << 1)
+#define TX_COMP_PHASE (1 << 2) /* tx complete */
+#define NONE_PHASE (1 << 31)
+
+#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
+#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
+#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
+#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
+#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
+#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
+#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
+#define IRIF_SIR_EOF 0x002A /* EOF value */
+#define IRIF_SIR_FLG 0x002C /* Flag clear */
+#define IRIF_UART_STS2 0x002E /* UART status 2 */
+#define IRIF_UART0 0x0030 /* UART control */
+#define IRIF_UART1 0x0032 /* UART status */
+#define IRIF_UART2 0x0034 /* UART mode */
+#define IRIF_UART3 0x0036 /* UART transmit data */
+#define IRIF_UART4 0x0038 /* UART receive data */
+#define IRIF_UART5 0x003A /* UART interrupt mask */
+#define IRIF_UART6 0x003C /* UART baud rate error correction */
+#define IRIF_UART7 0x003E /* UART baud rate count set */
+#define IRIF_CRC0 0x0040 /* CRC engine control */
+#define IRIF_CRC1 0x0042 /* CRC engine input data */
+#define IRIF_CRC2 0x0044 /* CRC engine calculation */
+#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
+#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
+
+/* IRIF_SIR0 */
+#define IRTPW (1 << 1) /* transmit pulse width select */
+#define IRERRC (1 << 0) /* Clear receive pulse width error */
+
+/* IRIF_SIR3 */
+#define IRERR (1 << 0) /* received pulse width Error */
+
+/* IRIF_SIR_FRM */
+#define EOFD (1 << 9) /* EOF detection flag */
+#define FRER (1 << 8) /* Frame Error bit */
+#define FRP (1 << 0) /* Frame processing set */
+
+/* IRIF_UART_STS2 */
+#define IRSME (1 << 6) /* Receive Sum Error flag */
+#define IROVE (1 << 5) /* Receive Overrun Error flag */
+#define IRFRE (1 << 4) /* Receive Framing Error flag */
+#define IRPRE (1 << 3) /* Receive Parity Error flag */
+
+/* IRIF_UART0_*/
+#define TBEC (1 << 2) /* Transmit Data Clear */
+#define RIE (1 << 1) /* Receive Enable */
+#define TIE (1 << 0) /* Transmit Enable */
+
+/* IRIF_UART1 */
+#define URSME (1 << 6) /* Receive Sum Error Flag */
+#define UROVE (1 << 5) /* Receive Overrun Error Flag */
+#define URFRE (1 << 4) /* Receive Framing Error Flag */
+#define URPRE (1 << 3) /* Receive Parity Error Flag */
+#define RBF (1 << 2) /* Receive Buffer Full Flag */
+#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
+#define TBE (1 << 0) /* Transmit Buffer Empty flag */
+#define TBCOMP (TSBE | TBE)
+
+/* IRIF_UART5 */
+#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
+#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
+#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
+#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
+#define RX_MASK (RSEIM | RBFIM)
+
+/* IRIF_CRC0 */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF
+
+/************************************************************************
+
+
+ structure
+
+
+************************************************************************/
+struct sh_sir_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
+{
+ iowrite16(data, self->membase + offset);
+}
+
+static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
+{
+ return ioread16(self->membase + offset);
+}
+
+static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ u16 old, new;
+
+ old = sh_sir_read(self, offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ sh_sir_write(self, offset, new);
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_sir_crc_reset(struct sh_sir_self *self)
+{
+ sh_sir_write(self, IRIF_CRC0, CRC_RST);
+}
+
+static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
+{
+ sh_sir_write(self, IRIF_CRC1, (u16)data);
+}
+
+static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
+{
+ return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
+}
+
+static u16 sh_sir_crc_out(struct sh_sir_self *self)
+{
+ return sh_sir_read(self, IRIF_CRC4);
+}
+
+static int sh_sir_crc_init(struct sh_sir_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_sir_crc_reset(self);
+
+ sh_sir_crc_add(self, 0xCC);
+ sh_sir_crc_add(self, 0xF5);
+ sh_sir_crc_add(self, 0xF1);
+ sh_sir_crc_add(self, 0xA7);
+
+ val = sh_sir_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_sir_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_sir_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ baud rate functions
+
+
+************************************************************************/
+#define SCLK_BASE 1843200 /* 1.8432MHz */
+
+static u32 sh_sir_find_sclk(struct clk *irda_clk)
+{
+ struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
+ struct cpufreq_frequency_table *pos;
+ struct clk *pclk = clk_get(NULL, "peripheral_clk");
+ u32 limit, min = 0xffffffff, tmp;
+ int index = 0;
+
+ limit = clk_get_rate(pclk);
+ clk_put(pclk);
+
+ /* IrDA can not set over peripheral_clk */
+ cpufreq_for_each_valid_entry(pos, freq_table) {
+ u32 freq = pos->frequency;
+
+ /* IrDA should not over peripheral_clk */
+ if (freq > limit)
+ continue;
+
+ tmp = freq % SCLK_BASE;
+ if (tmp < min) {
+ min = tmp;
+ index = pos - freq_table;
+ }
+ }
+
+ return freq_table[index].frequency;
+}
+
+#define ERR_ROUNDING(a) ((a + 5000) / 10000)
+static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
+{
+ struct clk *clk;
+ struct device *dev = &self->ndev->dev;
+ u32 rate;
+ u16 uabca, uabc;
+ u16 irbca, irbc;
+ u32 min, rerr, tmp;
+ int i;
+
+ /* Baud Rate Error Correction x 10000 */
+ u32 rate_err_array[] = {
+ 0, 625, 1250, 1875,
+ 2500, 3125, 3750, 4375,
+ 5000, 5625, 6250, 6875,
+ 7500, 8125, 8750, 9375,
+ };
+
+ /*
+ * FIXME
+ *
+ * it support 9600 only now
+ */
+ switch (baudrate) {
+ case 9600:
+ break;
+ default:
+ dev_err(dev, "un-supported baudrate %d\n", baudrate);
+ return -EIO;
+ }
+
+ clk = clk_get(NULL, "irda_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "can not get irda_clk\n");
+ return -EIO;
+ }
+
+ clk_set_rate(clk, sh_sir_find_sclk(clk));
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ dev_dbg(dev, "selected sclk = %d\n", rate);
+
+ /*
+ * CALCULATION
+ *
+ * 1843200 = system rate / (irbca + (irbc + 1))
+ */
+
+ irbc = rate / SCLK_BASE;
+
+ tmp = rate - (SCLK_BASE * irbc);
+ tmp *= 10000;
+
+ rerr = tmp / SCLK_BASE;
+
+ min = 0xffffffff;
+ irbca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ irbca = i;
+ }
+ }
+
+ tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
+ if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
+ dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
+ SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
+
+ irbca = (irbca & 0xF) << 4;
+ irbc = (irbc - 1) & 0xF;
+
+ if (!irbc) {
+ dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
+ return -EIO;
+ }
+
+ sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
+ sh_sir_write(self, IRIF_SIR1, irbca);
+ sh_sir_write(self, IRIF_SIR2, irbc);
+
+ /*
+ * CALCULATION
+ *
+ * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
+ */
+
+ uabc = rate / baudrate;
+ uabc = (uabc / 16) - 1;
+ uabc = (uabc + 1) * 16;
+
+ tmp = rate - (uabc * baudrate);
+ tmp *= 10000;
+
+ rerr = tmp / baudrate;
+
+ min = 0xffffffff;
+ uabca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ uabca = i;
+ }
+ }
+
+ tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
+ if ((baudrate / 100) < abs(tmp - baudrate))
+ dev_warn(dev, "UART freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
+ baudrate, tmp,
+ uabc, rate_err_array[uabca]);
+
+ uabca = (uabca & 0xF) << 4;
+ uabc = (uabc / 16) - 1;
+
+ sh_sir_write(self, IRIF_UART6, uabca);
+ sh_sir_write(self, IRIF_UART7, uabc);
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static int __sh_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+
+ return 0;
+}
+
+static void sh_sir_remove_iobuf(struct sh_sir_self *self)
+{
+ kfree(self->rx_buff.head);
+ kfree(self->tx_buff.head);
+
+ self->rx_buff.head = NULL;
+ self->tx_buff.head = NULL;
+}
+
+static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
+{
+ int err = -ENOMEM;
+
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return err;
+ }
+
+ err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
+ if (err)
+ goto iobuf_err;
+
+ err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
+
+iobuf_err:
+ if (err)
+ sh_sir_remove_iobuf(self);
+
+ return err;
+}
+
+/************************************************************************
+
+
+ status function
+
+
+************************************************************************/
+static void sh_sir_clear_all_err(struct sh_sir_self *self)
+{
+ /* Clear error flag for receive pulse width */
+ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
+
+ /* Clear frame / EOF error flag */
+ sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
+
+ /* Clear all status error */
+ sh_sir_write(self, IRIF_UART_STS2, 0);
+}
+
+static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
+{
+ u16 uart5 = 0;
+ u16 uart0 = 0;
+
+ switch (phase) {
+ case TX_PHASE:
+ uart5 = TBEIM;
+ uart0 = TBEC | TIE;
+ break;
+ case TX_COMP_PHASE:
+ uart5 = TSBEIM;
+ uart0 = TIE;
+ break;
+ case RX_PHASE:
+ uart5 = RX_MASK;
+ uart0 = RIE;
+ break;
+ default:
+ break;
+ }
+
+ sh_sir_write(self, IRIF_UART5, uart5);
+ sh_sir_write(self, IRIF_UART0, uart0);
+}
+
+static int sh_sir_is_which_phase(struct sh_sir_self *self)
+{
+ u16 val = sh_sir_read(self, IRIF_UART5);
+
+ if (val & TBEIM)
+ return TX_PHASE;
+
+ if (val & TSBEIM)
+ return TX_COMP_PHASE;
+
+ if (val & RX_MASK)
+ return RX_PHASE;
+
+ return NONE_PHASE;
+}
+
+static void sh_sir_tx(struct sh_sir_self *self, int phase)
+{
+ switch (phase) {
+ case TX_PHASE:
+ if (0 >= self->tx_buff.len) {
+ sh_sir_set_phase(self, TX_COMP_PHASE);
+ } else {
+ sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
+ self->tx_buff.len--;
+ self->tx_buff.data++;
+ }
+ break;
+ case TX_COMP_PHASE:
+ sh_sir_set_phase(self, RX_PHASE);
+ netif_wake_queue(self->ndev);
+ break;
+ default:
+ dev_err(&self->ndev->dev, "should not happen\n");
+ break;
+ }
+}
+
+static int sh_sir_read_data(struct sh_sir_self *self)
+{
+ u16 val = 0;
+ int timeout = 1024;
+
+ while (timeout--) {
+ val = sh_sir_read(self, IRIF_UART1);
+
+ /* data get */
+ if (val & RBF) {
+ if (val & (URSME | UROVE | URFRE | URPRE))
+ break;
+
+ return (int)sh_sir_read(self, IRIF_UART4);
+ }
+
+ udelay(1);
+ }
+
+ dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
+ val, sh_sir_read(self, IRIF_UART_STS2));
+
+ /* read data register for clear error */
+ sh_sir_read(self, IRIF_UART4);
+
+ return -1;
+}
+
+static void sh_sir_rx(struct sh_sir_self *self)
+{
+ int timeout = 1024;
+ int data;
+
+ while (timeout--) {
+ data = sh_sir_read_data(self);
+ if (data < 0)
+ break;
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, (u8)data);
+
+ if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
+ continue;
+
+ break;
+ }
+}
+
+static irqreturn_t sh_sir_irq(int irq, void *dev_id)
+{
+ struct sh_sir_self *self = dev_id;
+ struct device *dev = &self->ndev->dev;
+ int phase = sh_sir_is_which_phase(self);
+
+ switch (phase) {
+ case TX_COMP_PHASE:
+ case TX_PHASE:
+ sh_sir_tx(self, phase);
+ break;
+ case RX_PHASE:
+ if (sh_sir_read(self, IRIF_SIR3))
+ dev_err(dev, "rcv pulse width error occurred\n");
+
+ sh_sir_rx(self);
+ sh_sir_clear_all_err(self);
+ break;
+ default:
+ dev_err(dev, "unknown interrupt\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int speed = irda_get_next_speed(skb);
+
+ if ((0 < speed) &&
+ (9600 != speed)) {
+ dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
+ return -EIO;
+ }
+
+ netif_stop_queue(ndev);
+
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = 0;
+ if (skb->len)
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ sh_sir_set_phase(self, TX_PHASE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_sir_open(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_sir_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_sir_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENODEV;
+ goto open_err;
+ }
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
+ sh_sir_read(self, IRIF_UART1); /* flag clear */
+ sh_sir_read(self, IRIF_UART4); /* flag clear */
+ sh_sir_set_phase(self, RX_PHASE);
+
+ netif_start_queue(ndev);
+
+ dev_info(&self->ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_sir_stop(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_sir_ndo = {
+ .ndo_open = sh_sir_open,
+ .ndo_stop = sh_sir_stop,
+ .ndo_start_xmit = sh_sir_hard_xmit,
+ .ndo_do_ioctl = sh_sir_ioctl,
+ .ndo_get_stats = sh_sir_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int sh_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_sir_self *self;
+ struct resource *res;
+ char clk_name[8];
+ int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ err = -ENODEV;
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_sir_ndo;
+ ndev->irq = irq;
+
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+ err = devm_request_irq(&pdev->dev, irq, sh_sir_irq, 0, "sh_sir", self);
+ if (err) {
+ dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_sir_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int sh_sir_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_sir_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver sh_sir_driver = {
+ .probe = sh_sir_probe,
+ .remove = sh_sir_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(sh_sir_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/sir-dev.h b/drivers/staging/irda/drivers/sir-dev.h
new file mode 100644
index 000000000000..f50b9c1c0639
--- /dev/null
+++ b/drivers/staging/irda/drivers/sir-dev.h
@@ -0,0 +1,191 @@
+/*********************************************************************
+ *
+ * sir.h: include file for irda-sir device abstraction layer
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#ifndef IRDA_SIR_H
+#define IRDA_SIR_H
+
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h> // iobuff_t
+
+struct sir_fsm {
+ struct semaphore sem;
+ struct delayed_work work;
+ unsigned state, substate;
+ int param;
+ int result;
+};
+
+#define SIRDEV_STATE_WAIT_TX_COMPLETE 0x0100
+
+/* substates for wait_tx_complete */
+#define SIRDEV_STATE_WAIT_XMIT 0x0101
+#define SIRDEV_STATE_WAIT_UNTIL_SENT 0x0102
+#define SIRDEV_STATE_TX_DONE 0x0103
+
+#define SIRDEV_STATE_DONGLE_OPEN 0x0300
+
+/* 0x0301-0x03ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_DONGLE_CLOSE 0x0400
+
+/* 0x0401-0x04ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_SET_DTR_RTS 0x0500
+
+#define SIRDEV_STATE_SET_SPEED 0x0700
+#define SIRDEV_STATE_DONGLE_CHECK 0x0800
+#define SIRDEV_STATE_DONGLE_RESET 0x0900
+
+/* 0x0901-0x09ff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_DONGLE_SPEED 0x0a00
+/* 0x0a01-0x0aff reserved for individual dongle substates */
+
+#define SIRDEV_STATE_PORT_SPEED 0x0b00
+#define SIRDEV_STATE_DONE 0x0c00
+#define SIRDEV_STATE_ERROR 0x0d00
+#define SIRDEV_STATE_COMPLETE 0x0e00
+
+#define SIRDEV_STATE_DEAD 0xffff
+
+
+struct sir_dev;
+
+struct dongle_driver {
+
+ struct module *owner;
+
+ const char *driver_name;
+
+ IRDA_DONGLE type;
+
+ int (*open)(struct sir_dev *dev);
+ int (*close)(struct sir_dev *dev);
+ int (*reset)(struct sir_dev *dev);
+ int (*set_speed)(struct sir_dev *dev, unsigned speed);
+
+ struct list_head dongle_list;
+};
+
+struct sir_driver {
+
+ struct module *owner;
+
+ const char *driver_name;
+
+ int qos_mtt_bits;
+
+ int (*chars_in_buffer)(struct sir_dev *dev);
+ void (*wait_until_sent)(struct sir_dev *dev);
+ int (*set_speed)(struct sir_dev *dev, unsigned speed);
+ int (*set_dtr_rts)(struct sir_dev *dev, int dtr, int rts);
+
+ int (*do_write)(struct sir_dev *dev, const unsigned char *ptr, size_t len);
+
+ int (*start_dev)(struct sir_dev *dev);
+ int (*stop_dev)(struct sir_dev *dev);
+};
+
+
+/* exported */
+
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
+
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+ const char *name);
+int sirdev_put_instance(struct sir_dev *self);
+
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+
+/* low level helpers for SIR device/dongle setup */
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+
+/* not exported */
+
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
+
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+
+/* inline helpers */
+
+static inline int sirdev_schedule_speed(struct sir_dev *dev, unsigned speed)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_SPEED, speed);
+}
+
+static inline int sirdev_schedule_dongle_open(struct sir_dev *dev, int dongle_id)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_OPEN, dongle_id);
+}
+
+static inline int sirdev_schedule_dongle_close(struct sir_dev *dev)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_CLOSE, 0);
+}
+
+static inline int sirdev_schedule_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ int dtrrts;
+
+ dtrrts = ((dtr) ? 0x02 : 0x00) | ((rts) ? 0x01 : 0x00);
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_DTR_RTS, dtrrts);
+}
+
+#if 0
+static inline int sirdev_schedule_mode(struct sir_dev *dev, int mode)
+{
+ return sirdev_schedule_request(dev, SIRDEV_STATE_SET_MODE, mode);
+}
+#endif
+
+
+struct sir_dev {
+ struct net_device *netdev;
+
+ struct irlap_cb *irlap;
+
+ struct qos_info qos;
+
+ char hwname[32];
+
+ struct sir_fsm fsm;
+ atomic_t enable_rx;
+ int raw_tx;
+ spinlock_t tx_lock;
+
+ u32 new_speed;
+ u32 flags;
+
+ unsigned speed;
+
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ struct sk_buff *tx_skb;
+
+ const struct dongle_driver * dongle_drv;
+ const struct sir_driver * drv;
+ void *priv;
+
+};
+
+#endif /* IRDA_SIR_H */
diff --git a/drivers/staging/irda/drivers/sir_dev.c b/drivers/staging/irda/drivers/sir_dev.c
new file mode 100644
index 000000000000..6af26a7d787c
--- /dev/null
+++ b/drivers/staging/irda/drivers/sir_dev.c
@@ -0,0 +1,987 @@
+/*********************************************************************
+ *
+ * sir_dev.c: irda sir network device
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include "sir-dev.h"
+
+
+static struct workqueue_struct *irda_sir_wq;
+
+/* STATE MACHINE */
+
+/* substate handler of the config-fsm to handle the cases where we want
+ * to wait for transmit completion before changing the port configuration
+ */
+
+static int sirdev_tx_complete_fsm(struct sir_dev *dev)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+ unsigned next_state, delay;
+ unsigned bytes_left;
+
+ do {
+ next_state = fsm->substate; /* default: stay in current substate */
+ delay = 0;
+
+ switch(fsm->substate) {
+
+ case SIRDEV_STATE_WAIT_XMIT:
+ if (dev->drv->chars_in_buffer)
+ bytes_left = dev->drv->chars_in_buffer(dev);
+ else
+ bytes_left = 0;
+ if (!bytes_left) {
+ next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
+ break;
+ }
+
+ if (dev->speed > 115200)
+ delay = (bytes_left*8*10000) / (dev->speed/100);
+ else if (dev->speed > 0)
+ delay = (bytes_left*10*10000) / (dev->speed/100);
+ else
+ delay = 0;
+ /* expected delay (usec) until remaining bytes are sent */
+ if (delay < 100) {
+ udelay(delay);
+ delay = 0;
+ break;
+ }
+ /* sleep some longer delay (msec) */
+ delay = (delay+999) / 1000;
+ break;
+
+ case SIRDEV_STATE_WAIT_UNTIL_SENT:
+ /* block until underlaying hardware buffer are empty */
+ if (dev->drv->wait_until_sent)
+ dev->drv->wait_until_sent(dev);
+ next_state = SIRDEV_STATE_TX_DONE;
+ break;
+
+ case SIRDEV_STATE_TX_DONE:
+ return 0;
+
+ default:
+ net_err_ratelimited("%s - undefined state\n", __func__);
+ return -EINVAL;
+ }
+ fsm->substate = next_state;
+ } while (delay == 0);
+ return delay;
+}
+
+/*
+ * Function sirdev_config_fsm
+ *
+ * State machine to handle the configuration of the device (and attached dongle, if any).
+ * This handler is scheduled for execution in kIrDAd context, so we can sleep.
+ * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
+ * long. Instead, for longer delays we start a timer to reschedule us later.
+ * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
+ * Both must be unlocked/restarted on completion - but only on final exit.
+ */
+
+static void sirdev_config_fsm(struct work_struct *work)
+{
+ struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
+ struct sir_fsm *fsm = &dev->fsm;
+ int next_state;
+ int ret = -1;
+ unsigned delay;
+
+ pr_debug("%s(), <%ld>\n", __func__, jiffies);
+
+ do {
+ pr_debug("%s - state=0x%04x / substate=0x%04x\n",
+ __func__, fsm->state, fsm->substate);
+
+ next_state = fsm->state;
+ delay = 0;
+
+ switch(fsm->state) {
+
+ case SIRDEV_STATE_DONGLE_OPEN:
+ if (dev->dongle_drv != NULL) {
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+
+ /* Initialize dongle */
+ ret = sirdev_get_dongle(dev, fsm->param);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ /* Dongles are powered through the modem control lines which
+ * were just set during open. Before resetting, let's wait for
+ * the power to stabilize. This is what some dongle drivers did
+ * in open before, while others didn't - should be safe anyway.
+ */
+
+ delay = 50;
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+
+ fsm->param = 9600;
+
+ break;
+
+ case SIRDEV_STATE_DONGLE_CLOSE:
+ /* shouldn't we just treat this as success=? */
+ if (dev->dongle_drv == NULL) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_DTR_RTS:
+ ret = sirdev_set_dtr_rts(dev,
+ (fsm->param&0x02) ? TRUE : FALSE,
+ (fsm->param&0x01) ? TRUE : FALSE);
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_SPEED:
+ fsm->substate = SIRDEV_STATE_WAIT_XMIT;
+ next_state = SIRDEV_STATE_DONGLE_CHECK;
+ break;
+
+ case SIRDEV_STATE_DONGLE_CHECK:
+ ret = sirdev_tx_complete_fsm(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ if ((delay=ret) != 0)
+ break;
+
+ if (dev->dongle_drv) {
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+ }
+ else {
+ dev->speed = fsm->param;
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_RESET:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->reset(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0) {
+ /* set serial port according to dongle default speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
+ next_state = SIRDEV_STATE_DONGLE_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+ if (dev->dongle_drv->set_speed) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0)
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ break;
+
+ case SIRDEV_STATE_PORT_SPEED:
+ /* Finally we are ready to change the serial port speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ dev->new_speed = 0;
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_DONE:
+ /* Signal network layer so it can send more frames */
+ netif_wake_queue(dev->netdev);
+ next_state = SIRDEV_STATE_COMPLETE;
+ break;
+
+ default:
+ net_err_ratelimited("%s - undefined state\n", __func__);
+ fsm->result = -EINVAL;
+ /* fall thru */
+
+ case SIRDEV_STATE_ERROR:
+ net_err_ratelimited("%s - error: %d\n",
+ __func__, fsm->result);
+
+#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
+ netif_stop_queue(dev->netdev);
+#else
+ netif_wake_queue(dev->netdev);
+#endif
+ /* fall thru */
+
+ case SIRDEV_STATE_COMPLETE:
+ /* config change finished, so we are not busy any longer */
+ sirdev_enable_rx(dev);
+ up(&fsm->sem);
+ return;
+ }
+ fsm->state = next_state;
+ } while(!delay);
+
+ queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
+}
+
+/* schedule some device configuration task for execution by kIrDAd
+ * on behalf of the above state machine.
+ * can be called from process or interrupt/tasklet context.
+ */
+
+int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+
+ pr_debug("%s - state=0x%04x / param=%u\n", __func__,
+ initial_state, param);
+
+ if (down_trylock(&fsm->sem)) {
+ if (in_interrupt() || in_atomic() || irqs_disabled()) {
+ pr_debug("%s(), state machine busy!\n", __func__);
+ return -EWOULDBLOCK;
+ } else
+ down(&fsm->sem);
+ }
+
+ if (fsm->state == SIRDEV_STATE_DEAD) {
+ /* race with sirdev_close should never happen */
+ net_err_ratelimited("%s(), instance staled!\n", __func__);
+ up(&fsm->sem);
+ return -ESTALE; /* or better EPIPE? */
+ }
+
+ netif_stop_queue(dev->netdev);
+ atomic_set(&dev->enable_rx, 0);
+
+ fsm->state = initial_state;
+ fsm->param = param;
+ fsm->result = 0;
+
+ INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+ queue_delayed_work(irda_sir_wq, &fsm->work, 0);
+ return 0;
+}
+
+
+/***************************************************************************/
+
+void sirdev_enable_rx(struct sir_dev *dev)
+{
+ if (unlikely(atomic_read(&dev->enable_rx)))
+ return;
+
+ /* flush rx-buffer - should also help in case of problems with echo cancelation */
+ dev->rx_buff.data = dev->rx_buff.head;
+ dev->rx_buff.len = 0;
+ dev->rx_buff.in_frame = FALSE;
+ dev->rx_buff.state = OUTSIDE_FRAME;
+ atomic_set(&dev->enable_rx, 1);
+}
+
+static int sirdev_is_receiving(struct sir_dev *dev)
+{
+ if (!atomic_read(&dev->enable_rx))
+ return 0;
+
+ return dev->rx_buff.state != OUTSIDE_FRAME;
+}
+
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
+{
+ int err;
+
+ pr_debug("%s : requesting dongle %d.\n", __func__, type);
+
+ err = sirdev_schedule_dongle_open(dev, type);
+ if (unlikely(err))
+ return err;
+ down(&dev->fsm.sem); /* block until config change completed */
+ err = dev->fsm.result;
+ up(&dev->fsm.sem);
+ return err;
+}
+EXPORT_SYMBOL(sirdev_set_dongle);
+
+/* used by dongle drivers for dongle programming */
+
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
+{
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(len > dev->tx_buff.truesize))
+ return -ENOSPC;
+
+ spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
+ while (dev->tx_buff.len > 0) { /* wait until tx idle */
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ }
+
+ dev->tx_buff.data = dev->tx_buff.head;
+ memcpy(dev->tx_buff.data, buf, len);
+ dev->tx_buff.len = len;
+
+ ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+ if (ret > 0) {
+ pr_debug("%s(), raw-tx started\n", __func__);
+
+ dev->tx_buff.data += ret;
+ dev->tx_buff.len -= ret;
+ dev->raw_tx = 1;
+ ret = len; /* all data is going to be sent */
+ }
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(sirdev_raw_write);
+
+/* seems some dongle drivers may need this */
+
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
+{
+ int count;
+
+ if (atomic_read(&dev->enable_rx))
+ return -EIO; /* fail if we expect irda-frames */
+
+ count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
+
+ if (count > 0) {
+ memcpy(buf, dev->rx_buff.data, count);
+ dev->rx_buff.data += count;
+ dev->rx_buff.len -= count;
+ }
+
+ /* remaining stuff gets flushed when re-enabling normal rx */
+
+ return count;
+}
+EXPORT_SYMBOL(sirdev_raw_read);
+
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
+{
+ int ret = -ENXIO;
+ if (dev->drv->set_dtr_rts)
+ ret = dev->drv->set_dtr_rts(dev, dtr, rts);
+ return ret;
+}
+EXPORT_SYMBOL(sirdev_set_dtr_rts);
+
+/**********************************************************************/
+
+/* called from client driver - likely with bh-context - to indicate
+ * it made some progress with transmission. Hence we send the next
+ * chunk, if any, or complete the skb otherwise
+ */
+
+void sirdev_write_complete(struct sir_dev *dev)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+ int actual = 0;
+ int err;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ pr_debug("%s() - dev->tx_buff.len = %d\n",
+ __func__, dev->tx_buff.len);
+
+ if (likely(dev->tx_buff.len > 0)) {
+ /* Write data left in transmit buffer */
+ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+
+ if (likely(actual>0)) {
+ dev->tx_buff.data += actual;
+ dev->tx_buff.len -= actual;
+ }
+ else if (unlikely(actual<0)) {
+ /* could be dropped later when we have tx_timeout to recover */
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
+ if ((skb=dev->tx_skb) != NULL) {
+ dev->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ dev->netdev->stats.tx_errors++;
+ dev->netdev->stats.tx_dropped++;
+ }
+ dev->tx_buff.len = 0;
+ }
+ if (dev->tx_buff.len > 0)
+ goto done; /* more data to send later */
+ }
+
+ if (unlikely(dev->raw_tx != 0)) {
+ /* in raw mode we are just done now after the buffer was sent
+ * completely. Since this was requested by some dongle driver
+ * running under the control of the irda-thread we must take
+ * care here not to re-enable the queue. The queue will be
+ * restarted when the irda-thread has completed the request.
+ */
+
+ pr_debug("%s(), raw-tx done\n", __func__);
+ dev->raw_tx = 0;
+ goto done; /* no post-frame handling in raw mode */
+ }
+
+ /* we have finished now sending this skb.
+ * update statistics and free the skb.
+ * finally we check and trigger a pending speed change, if any.
+ * if not we switch to rx mode and wake the queue for further
+ * packets.
+ * note the scheduled speed request blocks until the lower
+ * client driver and the corresponding hardware has really
+ * finished sending all data (xmit fifo drained f.e.)
+ * before the speed change gets finally done and the queue
+ * re-activated.
+ */
+
+ pr_debug("%s(), finished with frame!\n", __func__);
+
+ if ((skb=dev->tx_skb) != NULL) {
+ dev->tx_skb = NULL;
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (unlikely(dev->new_speed > 0)) {
+ pr_debug("%s(), Changing speed!\n", __func__);
+ err = sirdev_schedule_speed(dev, dev->new_speed);
+ if (unlikely(err)) {
+ /* should never happen
+ * forget the speed change and hope the stack recovers
+ */
+ net_err_ratelimited("%s - schedule speed change failed: %d\n",
+ __func__, err);
+ netif_wake_queue(dev->netdev);
+ }
+ /* else: success
+ * speed change in progress now
+ * on completion dev->new_speed gets cleared,
+ * rx-reenabled and the queue restarted
+ */
+ }
+ else {
+ sirdev_enable_rx(dev);
+ netif_wake_queue(dev->netdev);
+ }
+
+done:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+EXPORT_SYMBOL(sirdev_write_complete);
+
+/* called from client driver - likely with bh-context - to give us
+ * some more received bytes. We put them into the rx-buffer,
+ * normally unwrapping and building LAP-skb's (unless rx disabled)
+ */
+
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
+{
+ if (!dev || !dev->netdev) {
+ net_warn_ratelimited("%s(), not ready yet!\n", __func__);
+ return -1;
+ }
+
+ if (!dev->irlap) {
+ net_warn_ratelimited("%s - too early: %p / %zd!\n",
+ __func__, cp, count);
+ return -1;
+ }
+
+ if (cp==NULL) {
+ /* error already at lower level receive
+ * just update stats and set media busy
+ */
+ irda_device_set_media_busy(dev->netdev, TRUE);
+ dev->netdev->stats.rx_dropped++;
+ pr_debug("%s; rx-drop: %zd\n", __func__, count);
+ return 0;
+ }
+
+ /* Read the characters into the buffer */
+ if (likely(atomic_read(&dev->enable_rx))) {
+ while (count--)
+ /* Unwrap and destuff one byte */
+ async_unwrap_char(dev->netdev, &dev->netdev->stats,
+ &dev->rx_buff, *cp++);
+ } else {
+ while (count--) {
+ /* rx not enabled: save the raw bytes and never
+ * trigger any netif_rx. The received bytes are flushed
+ * later when we re-enable rx but might be read meanwhile
+ * by the dongle driver.
+ */
+ dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
+
+ /* What should we do when the buffer is full? */
+ if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
+ dev->rx_buff.len = 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sirdev_receive);
+
+/**********************************************************************/
+
+/* callbacks from network layer */
+
+static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct sir_dev *dev = netdev_priv(ndev);
+ unsigned long flags;
+ int actual = 0;
+ int err;
+ s32 speed;
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+
+ netif_stop_queue(ndev);
+
+ pr_debug("%s(), skb->len = %d\n", __func__, skb->len);
+
+ speed = irda_get_next_speed(skb);
+ if ((speed != dev->speed) && (speed != -1)) {
+ if (!skb->len) {
+ err = sirdev_schedule_speed(dev, speed);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ /* Failed to initiate the speed change, likely the fsm
+ * is still busy (pretty unlikely, but...)
+ * We refuse to accept the skb and return with the queue
+ * stopped so the network layer will retry after the
+ * fsm completes and wakes the queue.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ else if (unlikely(err)) {
+ /* other fatal error - forget the speed change and
+ * hope the stack will recover somehow
+ */
+ netif_start_queue(ndev);
+ }
+ /* else: success
+ * speed change in progress now
+ * on completion the queue gets restarted
+ */
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ } else
+ dev->new_speed = speed;
+ }
+
+ /* Init tx buffer*/
+ dev->tx_buff.data = dev->tx_buff.head;
+
+ /* Check problems */
+ if(spin_is_locked(&dev->tx_lock)) {
+ pr_debug("%s(), write not completed\n", __func__);
+ }
+
+ /* serialize with write completion */
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
+
+ /* transmission will start now - disable receive.
+ * if we are just in the middle of an incoming frame,
+ * treat it as collision. probably it's a good idea to
+ * reset the rx_buf OUTSIDE_FRAME in this case too?
+ */
+ atomic_set(&dev->enable_rx, 0);
+ if (unlikely(sirdev_is_receiving(dev)))
+ dev->netdev->stats.collisions++;
+
+ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
+
+ if (likely(actual > 0)) {
+ dev->tx_skb = skb;
+ dev->tx_buff.data += actual;
+ dev->tx_buff.len -= actual;
+ }
+ else if (unlikely(actual < 0)) {
+ /* could be dropped later when we have tx_timeout to recover */
+ net_err_ratelimited("%s: drv->do_write failed (%d)\n",
+ __func__, actual);
+ dev_kfree_skb_any(skb);
+ dev->netdev->stats.tx_errors++;
+ dev->netdev->stats.tx_dropped++;
+ netif_wake_queue(ndev);
+ }
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* called from network layer with rtnl hold */
+
+static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct sir_dev *dev = netdev_priv(ndev);
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSDONGLE: /* Set dongle */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ irda_device_set_media_busy(dev->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = sirdev_is_receiving(dev);
+ break;
+
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+
+ case SIOCSMODE:
+#if 0
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else
+ ret = sirdev_schedule_mode(dev, irq->ifr_mode);
+ /* cannot sleep here for completion
+ * we are called from network layer with rtnl hold
+ */
+ break;
+#endif
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/* ----------------------------------------------------------------------------- */
+
+#define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
+
+static int sirdev_alloc_buffers(struct sir_dev *dev)
+{
+ dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
+ dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+
+ /* Bootstrap ZeroCopy Rx */
+ dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize,
+ GFP_KERNEL);
+ if (dev->rx_buff.skb == NULL)
+ return -ENOMEM;
+ skb_reserve(dev->rx_buff.skb, 1);
+ dev->rx_buff.head = dev->rx_buff.skb->data;
+
+ dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
+ if (dev->tx_buff.head == NULL) {
+ kfree_skb(dev->rx_buff.skb);
+ dev->rx_buff.skb = NULL;
+ dev->rx_buff.head = NULL;
+ return -ENOMEM;
+ }
+
+ dev->tx_buff.data = dev->tx_buff.head;
+ dev->rx_buff.data = dev->rx_buff.head;
+ dev->tx_buff.len = 0;
+ dev->rx_buff.len = 0;
+
+ dev->rx_buff.in_frame = FALSE;
+ dev->rx_buff.state = OUTSIDE_FRAME;
+ return 0;
+};
+
+static void sirdev_free_buffers(struct sir_dev *dev)
+{
+ kfree_skb(dev->rx_buff.skb);
+ kfree(dev->tx_buff.head);
+ dev->rx_buff.head = dev->tx_buff.head = NULL;
+ dev->rx_buff.skb = NULL;
+}
+
+static int sirdev_open(struct net_device *ndev)
+{
+ struct sir_dev *dev = netdev_priv(ndev);
+ const struct sir_driver *drv = dev->drv;
+
+ if (!drv)
+ return -ENODEV;
+
+ /* increase the reference count of the driver module before doing serious stuff */
+ if (!try_module_get(drv->owner))
+ return -ESTALE;
+
+ if (sirdev_alloc_buffers(dev))
+ goto errout_dec;
+
+ if (!dev->drv->start_dev || dev->drv->start_dev(dev))
+ goto errout_free;
+
+ sirdev_enable_rx(dev);
+ dev->raw_tx = 0;
+
+ netif_start_queue(ndev);
+ dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
+ if (!dev->irlap)
+ goto errout_stop;
+
+ netif_wake_queue(ndev);
+
+ pr_debug("%s - done, speed = %d\n", __func__, dev->speed);
+
+ return 0;
+
+errout_stop:
+ atomic_set(&dev->enable_rx, 0);
+ if (dev->drv->stop_dev)
+ dev->drv->stop_dev(dev);
+errout_free:
+ sirdev_free_buffers(dev);
+errout_dec:
+ module_put(drv->owner);
+ return -EAGAIN;
+}
+
+static int sirdev_close(struct net_device *ndev)
+{
+ struct sir_dev *dev = netdev_priv(ndev);
+ const struct sir_driver *drv;
+
+/* pr_debug("%s\n", __func__); */
+
+ netif_stop_queue(ndev);
+
+ down(&dev->fsm.sem); /* block on pending config completion */
+
+ atomic_set(&dev->enable_rx, 0);
+
+ if (unlikely(!dev->irlap))
+ goto out;
+ irlap_close(dev->irlap);
+ dev->irlap = NULL;
+
+ drv = dev->drv;
+ if (unlikely(!drv || !dev->priv))
+ goto out;
+
+ if (drv->stop_dev)
+ drv->stop_dev(dev);
+
+ sirdev_free_buffers(dev);
+ module_put(drv->owner);
+
+out:
+ dev->speed = 0;
+ up(&dev->fsm.sem);
+ return 0;
+}
+
+static const struct net_device_ops sirdev_ops = {
+ .ndo_start_xmit = sirdev_hard_xmit,
+ .ndo_open = sirdev_open,
+ .ndo_stop = sirdev_close,
+ .ndo_do_ioctl = sirdev_ioctl,
+};
+/* ----------------------------------------------------------------------------- */
+
+struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
+{
+ struct net_device *ndev;
+ struct sir_dev *dev;
+
+ pr_debug("%s - %s\n", __func__, name);
+
+ /* instead of adding tests to protect against drv->do_write==NULL
+ * at several places we refuse to create a sir_dev instance for
+ * drivers which don't implement do_write.
+ */
+ if (!drv || !drv->do_write)
+ return NULL;
+
+ /*
+ * Allocate new instance of the device
+ */
+ ndev = alloc_irdadev(sizeof(*dev));
+ if (ndev == NULL) {
+ net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n",
+ __func__);
+ goto out;
+ }
+ dev = netdev_priv(ndev);
+
+ irda_init_max_qos_capabilies(&dev->qos);
+ dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
+ irda_qos_bits_to_value(&dev->qos);
+
+ strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
+
+ atomic_set(&dev->enable_rx, 0);
+ dev->tx_skb = NULL;
+
+ spin_lock_init(&dev->tx_lock);
+ sema_init(&dev->fsm.sem, 1);
+
+ dev->drv = drv;
+ dev->netdev = ndev;
+
+ /* Override the network functions we need to use */
+ ndev->netdev_ops = &sirdev_ops;
+
+ if (register_netdev(ndev)) {
+ net_err_ratelimited("%s(), register_netdev() failed!\n",
+ __func__);
+ goto out_freenetdev;
+ }
+
+ return dev;
+
+out_freenetdev:
+ free_netdev(ndev);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(sirdev_get_instance);
+
+int sirdev_put_instance(struct sir_dev *dev)
+{
+ int err = 0;
+
+ pr_debug("%s\n", __func__);
+
+ atomic_set(&dev->enable_rx, 0);
+
+ netif_carrier_off(dev->netdev);
+ netif_device_detach(dev->netdev);
+
+ if (dev->dongle_drv)
+ err = sirdev_schedule_dongle_close(dev);
+ if (err)
+ net_err_ratelimited("%s - error %d\n", __func__, err);
+
+ sirdev_close(dev->netdev);
+
+ down(&dev->fsm.sem);
+ dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
+ dev->dongle_drv = NULL;
+ dev->priv = NULL;
+ up(&dev->fsm.sem);
+
+ /* Remove netdevice */
+ unregister_netdev(dev->netdev);
+
+ free_netdev(dev->netdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(sirdev_put_instance);
+
+static int __init sir_wq_init(void)
+{
+ irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
+ if (!irda_sir_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit sir_wq_exit(void)
+{
+ destroy_workqueue(irda_sir_wq);
+}
+
+module_init(sir_wq_init);
+module_exit(sir_wq_exit);
+
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/irda/drivers/sir_dongle.c b/drivers/staging/irda/drivers/sir_dongle.c
new file mode 100644
index 000000000000..7436f73ff1bb
--- /dev/null
+++ b/drivers/staging/irda/drivers/sir_dongle.c
@@ -0,0 +1,133 @@
+/*********************************************************************
+ *
+ * sir_dongle.c: manager for serial dongle protocol drivers
+ *
+ * Copyright (c) 2002 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/mutex.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+/**************************************************************************
+ *
+ * dongle registration and attachment
+ *
+ */
+
+static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
+static DEFINE_MUTEX(dongle_list_lock); /* protects the list */
+
+int irda_register_dongle(struct dongle_driver *new)
+{
+ struct list_head *entry;
+ struct dongle_driver *drv;
+
+ pr_debug("%s : registering dongle \"%s\" (%d).\n",
+ __func__, new->driver_name, new->type);
+
+ mutex_lock(&dongle_list_lock);
+ list_for_each(entry, &dongle_list) {
+ drv = list_entry(entry, struct dongle_driver, dongle_list);
+ if (new->type == drv->type) {
+ mutex_unlock(&dongle_list_lock);
+ return -EEXIST;
+ }
+ }
+ list_add(&new->dongle_list, &dongle_list);
+ mutex_unlock(&dongle_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(irda_register_dongle);
+
+int irda_unregister_dongle(struct dongle_driver *drv)
+{
+ mutex_lock(&dongle_list_lock);
+ list_del(&drv->dongle_list);
+ mutex_unlock(&dongle_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(irda_unregister_dongle);
+
+int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
+{
+ struct list_head *entry;
+ const struct dongle_driver *drv = NULL;
+ int err = -EINVAL;
+
+ request_module("irda-dongle-%d", type);
+
+ if (dev->dongle_drv != NULL)
+ return -EBUSY;
+
+ /* serialize access to the list of registered dongles */
+ mutex_lock(&dongle_list_lock);
+
+ list_for_each(entry, &dongle_list) {
+ drv = list_entry(entry, struct dongle_driver, dongle_list);
+ if (drv->type == type)
+ break;
+ else
+ drv = NULL;
+ }
+
+ if (!drv) {
+ err = -ENODEV;
+ goto out_unlock; /* no such dongle */
+ }
+
+ /* handling of SMP races with dongle module removal - three cases:
+ * 1) dongle driver was already unregistered - then we haven't found the
+ * requested dongle above and are already out here
+ * 2) the module is already marked deleted but the driver is still
+ * registered - then the try_module_get() below will fail
+ * 3) the try_module_get() below succeeds before the module is marked
+ * deleted - then sys_delete_module() fails and prevents the removal
+ * because the module is in use.
+ */
+
+ if (!try_module_get(drv->owner)) {
+ err = -ESTALE;
+ goto out_unlock; /* rmmod already pending */
+ }
+ dev->dongle_drv = drv;
+
+ if (!drv->open || (err=drv->open(dev))!=0)
+ goto out_reject; /* failed to open driver */
+
+ mutex_unlock(&dongle_list_lock);
+ return 0;
+
+out_reject:
+ dev->dongle_drv = NULL;
+ module_put(drv->owner);
+out_unlock:
+ mutex_unlock(&dongle_list_lock);
+ return err;
+}
+
+int sirdev_put_dongle(struct sir_dev *dev)
+{
+ const struct dongle_driver *drv = dev->dongle_drv;
+
+ if (drv) {
+ if (drv->close)
+ drv->close(dev); /* close this dongle instance */
+
+ dev->dongle_drv = NULL; /* unlink the dongle driver */
+ module_put(drv->owner);/* decrement driver's module refcount */
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/irda/drivers/smsc-ircc2.c b/drivers/staging/irda/drivers/smsc-ircc2.c
new file mode 100644
index 000000000000..19a55cba6beb
--- /dev/null
+++ b/drivers/staging/irda/drivers/smsc-ircc2.c
@@ -0,0 +1,3026 @@
+/*********************************************************************
+ *
+ * Description: Driver for the SMC Infrared Communications Controller
+ * Author: Daniele Peri (peri@csai.unipa.it)
+ * Created at:
+ * Modified at:
+ * Modified by:
+ *
+ * Copyright (c) 2002 Daniele Peri
+ * All Rights Reserved.
+ * Copyright (c) 2002 Jean Tourrilhes
+ * Copyright (c) 2006 Linus Walleij
+ *
+ *
+ * Based on smc-ircc.c:
+ *
+ * Copyright (c) 2001 Stefani Seibold
+ * Copyright (c) 1999-2001 Dag Brattli
+ * Copyright (c) 1998-1999 Thomas Davis,
+ *
+ * and irport.c:
+ *
+ * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/serial_reg.h>
+#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "smsc-ircc2.h"
+#include "smsc-sio.h"
+
+
+MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
+MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
+MODULE_LICENSE("GPL");
+
+static bool smsc_nopnp = true;
+module_param_named(nopnp, smsc_nopnp, bool, 0);
+MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
+
+#define DMA_INVAL 255
+static int ircc_dma = DMA_INVAL;
+module_param_hw(ircc_dma, int, dma, 0);
+MODULE_PARM_DESC(ircc_dma, "DMA channel");
+
+#define IRQ_INVAL 255
+static int ircc_irq = IRQ_INVAL;
+module_param_hw(ircc_irq, int, irq, 0);
+MODULE_PARM_DESC(ircc_irq, "IRQ line");
+
+static int ircc_fir;
+module_param_hw(ircc_fir, int, ioport, 0);
+MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
+
+static int ircc_sir;
+module_param_hw(ircc_sir, int, ioport, 0);
+MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
+
+static int ircc_cfg;
+module_param_hw(ircc_cfg, int, ioport, 0);
+MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
+
+static int ircc_transceiver;
+module_param(ircc_transceiver, int, 0);
+MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
+
+/* Types */
+
+#ifdef CONFIG_PCI
+struct smsc_ircc_subsystem_configuration {
+ unsigned short vendor; /* PCI vendor ID */
+ unsigned short device; /* PCI vendor ID */
+ unsigned short subvendor; /* PCI subsystem vendor ID */
+ unsigned short subdevice; /* PCI subsystem device ID */
+ unsigned short sir_io; /* I/O port for SIR */
+ unsigned short fir_io; /* I/O port for FIR */
+ unsigned char fir_irq; /* FIR IRQ */
+ unsigned char fir_dma; /* FIR DMA */
+ unsigned short cfg_base; /* I/O port for chip configuration */
+ int (*preconfigure)(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); /* Preconfig function */
+ const char *name; /* name shown as info */
+};
+#endif
+
+struct smsc_transceiver {
+ char *name;
+ void (*set_for_speed)(int fir_base, u32 speed);
+ int (*probe)(int fir_base);
+};
+
+struct smsc_chip {
+ char *name;
+ #if 0
+ u8 type;
+ #endif
+ u16 flags;
+ u8 devid;
+ u8 rev;
+};
+
+struct smsc_chip_address {
+ unsigned int cfg_base;
+ unsigned int type;
+};
+
+/* Private data for each instance */
+struct smsc_ircc_cb {
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+ __u32 flags; /* Interface flags */
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ int transceiver;
+ struct platform_device *pldev;
+};
+
+/* Constants */
+
+#define SMSC_IRCC2_DRIVER_NAME "smsc-ircc2"
+
+#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600
+#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1
+#define SMSC_IRCC2_C_NET_TIMEOUT 0
+#define SMSC_IRCC2_C_SIR_STOP 0
+
+static const char *driver_name = SMSC_IRCC2_DRIVER_NAME;
+
+/* Prototypes */
+
+static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq);
+static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base);
+static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq);
+static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self);
+static void smsc_ircc_init_chip(struct smsc_ircc_cb *self);
+static int __exit smsc_ircc_close(struct smsc_ircc_cb *self);
+static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self);
+static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self);
+static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self);
+static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t smsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs);
+static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self);
+static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed);
+static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, u32 speed);
+static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id);
+static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev);
+static void smsc_ircc_sir_start(struct smsc_ircc_cb *self);
+#if SMSC_IRCC2_C_SIR_STOP
+static void smsc_ircc_sir_stop(struct smsc_ircc_cb *self);
+#endif
+static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self);
+static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
+static int smsc_ircc_net_open(struct net_device *dev);
+static int smsc_ircc_net_close(struct net_device *dev);
+static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#if SMSC_IRCC2_C_NET_TIMEOUT
+static void smsc_ircc_timeout(struct net_device *dev);
+#endif
+static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
+static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
+static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
+static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
+
+/* Probing */
+static int __init smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int __init smsc_superio_fdc(unsigned short cfg_base);
+static int __init smsc_superio_lpc(unsigned short cfg_base);
+#ifdef CONFIG_PCI
+static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void __init preconfigure_ali_port(struct pci_dev *dev,
+ unsigned short port);
+static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+ unsigned short ircc_fir,
+ unsigned short ircc_sir,
+ unsigned char ircc_dma,
+ unsigned char ircc_irq);
+#endif
+
+/* Transceivers specific functions */
+
+static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base);
+static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base);
+static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed);
+static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
+
+/* Power Management */
+
+static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int smsc_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver smsc_ircc_driver = {
+ .suspend = smsc_ircc_suspend,
+ .resume = smsc_ircc_resume,
+ .driver = {
+ .name = SMSC_IRCC2_DRIVER_NAME,
+ },
+};
+
+/* Transceivers for SMSC-ircc */
+
+static struct smsc_transceiver smsc_transceivers[] =
+{
+ { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800 },
+ { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select },
+ { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc },
+ { NULL, NULL }
+};
+#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (ARRAY_SIZE(smsc_transceivers) - 1)
+
+/* SMC SuperIO chipsets definitions */
+
+#define KEY55_1 0 /* SuperIO Configuration mode with Key <0x55> */
+#define KEY55_2 1 /* SuperIO Configuration mode with Key <0x55,0x55> */
+#define NoIRDA 2 /* SuperIO Chip has no IRDA Port */
+#define SIR 0 /* SuperIO Chip has only slow IRDA */
+#define FIR 4 /* SuperIO Chip has fast IRDA */
+#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */
+
+static struct smsc_chip __initdata fdc_chips_flat[] =
+{
+ /* Base address 0x3f0 or 0x370 */
+ { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */
+ { "37C665GT", KEY55_2|NoIRDA, 0x65, 0x01 },
+ { "37C665GT", KEY55_2|NoIRDA, 0x66, 0x01 },
+ { "37C669", KEY55_2|SIR|SERx4, 0x03, 0x02 },
+ { "37C669", KEY55_2|SIR|SERx4, 0x04, 0x02 }, /* ID? */
+ { "37C78", KEY55_2|NoIRDA, 0x78, 0x00 },
+ { "37N769", KEY55_1|FIR|SERx4, 0x28, 0x00 },
+ { "37N869", KEY55_1|FIR|SERx4, 0x29, 0x00 },
+ { NULL }
+};
+
+static struct smsc_chip __initdata fdc_chips_paged[] =
+{
+ /* Base address 0x3f0 or 0x370 */
+ { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 },
+ { "37B77X", KEY55_1|SIR|SERx4, 0x43, 0x00 },
+ { "37B78X", KEY55_1|SIR|SERx4, 0x44, 0x00 },
+ { "37B80X", KEY55_1|SIR|SERx4, 0x42, 0x00 },
+ { "37C67X", KEY55_1|FIR|SERx4, 0x40, 0x00 },
+ { "37C93X", KEY55_2|SIR|SERx4, 0x02, 0x01 },
+ { "37C93XAPM", KEY55_1|SIR|SERx4, 0x30, 0x01 },
+ { "37C93XFR", KEY55_2|FIR|SERx4, 0x03, 0x01 },
+ { "37M707", KEY55_1|SIR|SERx4, 0x42, 0x00 },
+ { "37M81X", KEY55_1|SIR|SERx4, 0x4d, 0x00 },
+ { "37N958FR", KEY55_1|FIR|SERx4, 0x09, 0x04 },
+ { "37N971", KEY55_1|FIR|SERx4, 0x0a, 0x00 },
+ { "37N972", KEY55_1|FIR|SERx4, 0x0b, 0x00 },
+ { NULL }
+};
+
+static struct smsc_chip __initdata lpc_chips_flat[] =
+{
+ /* Base address 0x2E or 0x4E */
+ { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
+ { "47N227", KEY55_1|FIR|SERx4, 0x7a, 0x00 },
+ { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 },
+ { NULL }
+};
+
+static struct smsc_chip __initdata lpc_chips_paged[] =
+{
+ /* Base address 0x2E or 0x4E */
+ { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 },
+ { "47B37X", KEY55_1|SIR|SERx4, 0x52, 0x00 },
+ { "47M10X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
+ { "47M120", KEY55_1|NoIRDA|SERx4, 0x5c, 0x00 },
+ { "47M13X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
+ { "47M14X", KEY55_1|SIR|SERx4, 0x5f, 0x00 },
+ { "47N252", KEY55_1|FIR|SERx4, 0x0e, 0x00 },
+ { "47S42X", KEY55_1|SIR|SERx4, 0x57, 0x00 },
+ { NULL }
+};
+
+#define SMSCSIO_TYPE_FDC 1
+#define SMSCSIO_TYPE_LPC 2
+#define SMSCSIO_TYPE_FLAT 4
+#define SMSCSIO_TYPE_PAGED 8
+
+static struct smsc_chip_address __initdata possible_addresses[] =
+{
+ { 0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
+ { 0, 0 }
+};
+
+/* Globals */
+
+static struct smsc_ircc_cb *dev_self[] = { NULL, NULL };
+static unsigned short dev_count;
+
+static inline void register_bank(int iobase, int bank)
+{
+ outb(((inb(iobase + IRCC_MASTER) & 0xf0) | (bank & 0x07)),
+ iobase + IRCC_MASTER);
+}
+
+/* PNP hotplug support */
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
+ { .id = "SMCf010", .driver_data = 0 },
+ /* and presumably others */
+ { }
+};
+MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
+
+static int pnp_driver_registered;
+
+#ifdef CONFIG_PNP
+static int smsc_ircc_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ unsigned int firbase, sirbase;
+ u8 dma, irq;
+
+ if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+ pnp_dma_valid(dev, 0) && pnp_irq_valid(dev, 0)))
+ return -EINVAL;
+
+ sirbase = pnp_port_start(dev, 0);
+ firbase = pnp_port_start(dev, 1);
+ dma = pnp_dma(dev, 0);
+ irq = pnp_irq(dev, 0);
+
+ if (smsc_ircc_open(firbase, sirbase, dma, irq))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct pnp_driver smsc_ircc_pnp_driver = {
+ .name = "smsc-ircc2",
+ .id_table = smsc_ircc_pnp_table,
+ .probe = smsc_ircc_pnp_probe,
+};
+#else /* CONFIG_PNP */
+static struct pnp_driver smsc_ircc_pnp_driver;
+#endif
+
+/*******************************************************************************
+ *
+ *
+ * SMSC-ircc stuff
+ *
+ *
+ *******************************************************************************/
+
+static int __init smsc_ircc_legacy_probe(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_PCI
+ if (smsc_ircc_preconfigure_subsystems(ircc_cfg, ircc_fir, ircc_sir, ircc_dma, ircc_irq) < 0) {
+ /* Ignore errors from preconfiguration */
+ net_err_ratelimited("%s, Preconfiguration failed !\n",
+ driver_name);
+ }
+#endif
+
+ if (ircc_fir > 0 && ircc_sir > 0) {
+ net_info_ratelimited(" Overriding FIR address 0x%04x\n",
+ ircc_fir);
+ net_info_ratelimited(" Overriding SIR address 0x%04x\n",
+ ircc_sir);
+
+ if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq))
+ ret = -ENODEV;
+ } else {
+ ret = -ENODEV;
+
+ /* try user provided configuration register base address */
+ if (ircc_cfg > 0) {
+ net_info_ratelimited(" Overriding configuration address 0x%04x\n",
+ ircc_cfg);
+ if (!smsc_superio_fdc(ircc_cfg))
+ ret = 0;
+ if (!smsc_superio_lpc(ircc_cfg))
+ ret = 0;
+ }
+
+ if (smsc_ircc_look_for_chips() > 0)
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * Function smsc_ircc_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init smsc_ircc_init(void)
+{
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ ret = platform_driver_register(&smsc_ircc_driver);
+ if (ret) {
+ net_err_ratelimited("%s, Can't register driver!\n",
+ driver_name);
+ return ret;
+ }
+
+ dev_count = 0;
+
+ if (smsc_nopnp || !pnp_platform_devices ||
+ ircc_cfg || ircc_fir || ircc_sir ||
+ ircc_dma != DMA_INVAL || ircc_irq != IRQ_INVAL) {
+ ret = smsc_ircc_legacy_probe();
+ } else {
+ if (pnp_register_driver(&smsc_ircc_pnp_driver) == 0)
+ pnp_driver_registered = 1;
+ }
+
+ if (ret) {
+ if (pnp_driver_registered)
+ pnp_unregister_driver(&smsc_ircc_pnp_driver);
+ platform_driver_unregister(&smsc_ircc_driver);
+ }
+
+ return ret;
+}
+
+static netdev_tx_t smsc_ircc_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+
+ if (self->io.speed > 115200)
+ return smsc_ircc_hard_xmit_fir(skb, dev);
+ else
+ return smsc_ircc_hard_xmit_sir(skb, dev);
+}
+
+static const struct net_device_ops smsc_ircc_netdev_ops = {
+ .ndo_open = smsc_ircc_net_open,
+ .ndo_stop = smsc_ircc_net_close,
+ .ndo_do_ioctl = smsc_ircc_net_ioctl,
+ .ndo_start_xmit = smsc_ircc_net_xmit,
+#if SMSC_IRCC2_C_NET_TIMEOUT
+ .ndo_tx_timeout = smsc_ircc_timeout,
+#endif
+};
+
+/*
+ * Function smsc_ircc_open (firbase, sirbase, dma, irq)
+ *
+ * Try to open driver instance
+ *
+ */
+static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+{
+ struct smsc_ircc_cb *self;
+ struct net_device *dev;
+ int err;
+
+ pr_debug("%s\n", __func__);
+
+ err = smsc_ircc_present(fir_base, sir_base);
+ if (err)
+ goto err_out;
+
+ err = -ENOMEM;
+ if (dev_count >= ARRAY_SIZE(dev_self)) {
+ net_warn_ratelimited("%s(), too many devices!\n", __func__);
+ goto err_out1;
+ }
+
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
+ if (!dev) {
+ net_warn_ratelimited("%s() can't allocate net device\n",
+ __func__);
+ goto err_out1;
+ }
+
+#if SMSC_IRCC2_C_NET_TIMEOUT
+ dev->watchdog_timeo = HZ * 2; /* Allow enough time for speed change */
+#endif
+ dev->netdev_ops = &smsc_ircc_netdev_ops;
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+
+ /* Make ifconfig display some details */
+ dev->base_addr = self->io.fir_base = fir_base;
+ dev->irq = self->io.irq = irq;
+
+ /* Need to store self somewhere */
+ dev_self[dev_count] = self;
+ spin_lock_init(&self->lock);
+
+ self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
+ self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
+
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL)
+ goto err_out2;
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL)
+ goto err_out3;
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
+ smsc_ircc_setup_qos(self);
+ smsc_ircc_init_chip(self);
+
+ if (ircc_transceiver > 0 &&
+ ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS)
+ self->transceiver = ircc_transceiver;
+ else
+ smsc_ircc_probe_transceiver(self);
+
+ err = register_netdev(self->netdev);
+ if (err) {
+ net_err_ratelimited("%s, Network device registration failed!\n",
+ driver_name);
+ goto err_out4;
+ }
+
+ self->pldev = platform_device_register_simple(SMSC_IRCC2_DRIVER_NAME,
+ dev_count, NULL, 0);
+ if (IS_ERR(self->pldev)) {
+ err = PTR_ERR(self->pldev);
+ goto err_out5;
+ }
+ platform_set_drvdata(self->pldev, self);
+
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+ dev_count++;
+
+ return 0;
+
+ err_out5:
+ unregister_netdev(self->netdev);
+
+ err_out4:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ free_netdev(self->netdev);
+ dev_self[dev_count] = NULL;
+ err_out1:
+ release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
+ release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
+ err_out:
+ return err;
+}
+
+/*
+ * Function smsc_ircc_present(fir_base, sir_base)
+ *
+ * Check the smsc-ircc chip presence
+ *
+ */
+static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
+{
+ unsigned char low, high, chip, config, dma, irq, version;
+
+ if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
+ driver_name)) {
+ net_warn_ratelimited("%s: can't get fir_base of 0x%03x\n",
+ __func__, fir_base);
+ goto out1;
+ }
+
+ if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
+ driver_name)) {
+ net_warn_ratelimited("%s: can't get sir_base of 0x%03x\n",
+ __func__, sir_base);
+ goto out2;
+ }
+
+ register_bank(fir_base, 3);
+
+ high = inb(fir_base + IRCC_ID_HIGH);
+ low = inb(fir_base + IRCC_ID_LOW);
+ chip = inb(fir_base + IRCC_CHIP_ID);
+ version = inb(fir_base + IRCC_VERSION);
+ config = inb(fir_base + IRCC_INTERFACE);
+ dma = config & IRCC_INTERFACE_DMA_MASK;
+ irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
+
+ if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
+ net_warn_ratelimited("%s(), addr 0x%04x - no device found!\n",
+ __func__, fir_base);
+ goto out3;
+ }
+ net_info_ratelimited("SMsC IrDA Controller found\n IrCC version %d.%d, firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
+ chip & 0x0f, version,
+ fir_base, sir_base, dma, irq);
+
+ return 0;
+
+ out3:
+ release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
+ out2:
+ release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
+ out1:
+ return -ENODEV;
+}
+
+/*
+ * Function smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq)
+ *
+ * Setup I/O
+ *
+ */
+static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
+ unsigned int fir_base, unsigned int sir_base,
+ u8 dma, u8 irq)
+{
+ unsigned char config, chip_dma, chip_irq;
+
+ register_bank(fir_base, 3);
+ config = inb(fir_base + IRCC_INTERFACE);
+ chip_dma = config & IRCC_INTERFACE_DMA_MASK;
+ chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
+
+ self->io.fir_base = fir_base;
+ self->io.sir_base = sir_base;
+ self->io.fir_ext = SMSC_IRCC2_FIR_CHIP_IO_EXTENT;
+ self->io.sir_ext = SMSC_IRCC2_SIR_CHIP_IO_EXTENT;
+ self->io.fifo_size = SMSC_IRCC2_FIFO_SIZE;
+ self->io.speed = SMSC_IRCC2_C_IRDA_FALLBACK_SPEED;
+
+ if (irq != IRQ_INVAL) {
+ if (irq != chip_irq)
+ net_info_ratelimited("%s, Overriding IRQ - chip says %d, using %d\n",
+ driver_name, chip_irq, irq);
+ self->io.irq = irq;
+ } else
+ self->io.irq = chip_irq;
+
+ if (dma != DMA_INVAL) {
+ if (dma != chip_dma)
+ net_info_ratelimited("%s, Overriding DMA - chip says %d, using %d\n",
+ driver_name, chip_dma, dma);
+ self->io.dma = dma;
+ } else
+ self->io.dma = chip_dma;
+
+}
+
+/*
+ * Function smsc_ircc_setup_qos(self)
+ *
+ * Setup qos
+ *
+ */
+static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
+{
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
+ IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+
+ self->qos.min_turn_time.bits = SMSC_IRCC2_MIN_TURN_TIME;
+ self->qos.window_size.bits = SMSC_IRCC2_WINDOW_SIZE;
+ irda_qos_bits_to_value(&self->qos);
+}
+
+/*
+ * Function smsc_ircc_init_chip(self)
+ *
+ * Init chip
+ *
+ */
+static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+ outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
+ outb(0x00, iobase + IRCC_MASTER);
+
+ register_bank(iobase, 1);
+ outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | IRCC_CFGA_IRDA_SIR_A),
+ iobase + IRCC_SCE_CFGA);
+
+#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
+ outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
+ iobase + IRCC_SCE_CFGB);
+#else
+ outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
+ iobase + IRCC_SCE_CFGB);
+#endif
+ (void) inb(iobase + IRCC_FIFO_THRESHOLD);
+ outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
+
+ register_bank(iobase, 4);
+ outb((inb(iobase + IRCC_CONTROL) & 0x30), iobase + IRCC_CONTROL);
+
+ register_bank(iobase, 0);
+ outb(0, iobase + IRCC_LCR_A);
+
+ smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
+
+ /* Power on device */
+ outb(0x00, iobase + IRCC_MASTER);
+}
+
+/*
+ * Function smsc_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else {
+ /* Make sure we are the only one touching
+ * self->io.speed and the hardware - Jean II */
+ spin_lock_irqsave(&self->lock, flags);
+ smsc_ircc_change_speed(self, irq->ifr_baudrate);
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = smsc_ircc_is_receiving(self);
+ break;
+ #if 0
+ case SIOCSDTRRTS:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ smsc_ircc_sir_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
+ break;
+ #endif
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+#if SMSC_IRCC2_C_NET_TIMEOUT
+/*
+ * Function smsc_ircc_timeout (struct net_device *dev)
+ *
+ * The networking timeout management.
+ *
+ */
+
+static void smsc_ircc_timeout(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+ unsigned long flags;
+
+ net_warn_ratelimited("%s: transmit timed out, changing speed to: %d\n",
+ dev->name, self->io.speed);
+ spin_lock_irqsave(&self->lock, flags);
+ smsc_ircc_sir_start(self);
+ smsc_ircc_change_speed(self, self->io.speed);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+#endif
+
+/*
+ * Function smsc_ircc_hard_xmit_sir (struct sk_buff *skb, struct net_device *dev)
+ *
+ * Transmits the current frame until FIFO is full, then
+ * waits until the next transmit interrupt, and continues until the
+ * frame is transmitted.
+ */
+static netdev_tx_t smsc_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ s32 speed;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ netif_stop_queue(dev);
+
+ /* Make sure test of self->io.speed & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if (speed != self->io.speed && speed != -1) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /*
+ * We send frames one by one in SIR mode (no
+ * pipelining), so at this point, if we were sending
+ * a previous frame, we just received the interrupt
+ * telling us it is finished (UART_IIR_THRI).
+ * Therefore, waiting for the transmitter to really
+ * finish draining the fifo won't take too long.
+ * And the interrupt handler is not expected to run.
+ * - Jean II */
+ smsc_ircc_sir_wait_hw_transmitter_finish(self);
+ smsc_ircc_change_speed(self, speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ self->new_speed = speed;
+ }
+
+ /* Init tx buffer */
+ self->tx_buff.data = self->tx_buff.head;
+
+ /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ dev->stats.tx_bytes += self->tx_buff.len;
+
+ /* Turn on transmit finished interrupt. Will fire immediately! */
+ outb(UART_IER_THRI, self->io.sir_base + UART_IER);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function smsc_ircc_set_fir_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ int fir_base, ir_mode, ctrl, fast;
+
+ IRDA_ASSERT(self != NULL, return;);
+ fir_base = self->io.fir_base;
+
+ self->io.speed = speed;
+
+ switch (speed) {
+ default:
+ case 576000:
+ ir_mode = IRCC_CFGA_IRDA_HDLC;
+ ctrl = IRCC_CRC;
+ fast = 0;
+ pr_debug("%s(), handling baud of 576000\n", __func__);
+ break;
+ case 1152000:
+ ir_mode = IRCC_CFGA_IRDA_HDLC;
+ ctrl = IRCC_1152 | IRCC_CRC;
+ fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
+ pr_debug("%s(), handling baud of 1152000\n",
+ __func__);
+ break;
+ case 4000000:
+ ir_mode = IRCC_CFGA_IRDA_4PPM;
+ ctrl = IRCC_CRC;
+ fast = IRCC_LCR_A_FAST;
+ pr_debug("%s(), handling baud of 4000000\n",
+ __func__);
+ break;
+ }
+ #if 0
+ Now in tranceiver!
+ /* This causes an interrupt */
+ register_bank(fir_base, 0);
+ outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast, fir_base + IRCC_LCR_A);
+ #endif
+
+ register_bank(fir_base, 1);
+ outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base + IRCC_SCE_CFGA);
+
+ register_bank(fir_base, 4);
+ outb((inb(fir_base + IRCC_CONTROL) & 0x30) | ctrl, fir_base + IRCC_CONTROL);
+}
+
+/*
+ * Function smsc_ircc_fir_start(self)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
+{
+ struct net_device *dev;
+ int fir_base;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+ IRDA_ASSERT(dev != NULL, return;);
+
+ fir_base = self->io.fir_base;
+
+ /* Reset everything */
+
+ /* Clear FIFO */
+ outb(inb(fir_base + IRCC_LCR_A) | IRCC_LCR_A_FIFO_RESET, fir_base + IRCC_LCR_A);
+
+ /* Enable interrupt */
+ /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base + IRCC_IER);*/
+
+ register_bank(fir_base, 1);
+
+ /* Select the TX/RX interface */
+#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */
+ outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
+ fir_base + IRCC_SCE_CFGB);
+#else
+ outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
+ fir_base + IRCC_SCE_CFGB);
+#endif
+ (void) inb(fir_base + IRCC_FIFO_THRESHOLD);
+
+ /* Enable SCE interrupts */
+ outb(0, fir_base + IRCC_MASTER);
+ register_bank(fir_base, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, fir_base + IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, fir_base + IRCC_MASTER);
+}
+
+/*
+ * Function smsc_ircc_fir_stop(self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
+{
+ int fir_base;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ fir_base = self->io.fir_base;
+ register_bank(fir_base, 0);
+ /*outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);*/
+ outb(inb(fir_base + IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base + IRCC_LCR_B);
+}
+
+
+/*
+ * Function smsc_ircc_change_speed(self, baud)
+ *
+ * Change the speed of the device
+ *
+ * This function *must* be called with spinlock held, because it may
+ * be called from the irq handler. - Jean II
+ */
+static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ struct net_device *dev;
+ int last_speed_was_sir;
+
+ pr_debug("%s() changing speed to: %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+
+ last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;
+
+ #if 0
+ /* Temp Hack */
+ speed= 1152000;
+ self->io.speed = speed;
+ last_speed_was_sir = 0;
+ smsc_ircc_fir_start(self);
+ #endif
+
+ if (self->io.speed == 0)
+ smsc_ircc_sir_start(self);
+
+ #if 0
+ if (!last_speed_was_sir) speed = self->io.speed;
+ #endif
+
+ if (self->io.speed != speed)
+ smsc_ircc_set_transceiver_for_speed(self, speed);
+
+ self->io.speed = speed;
+
+ if (speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
+ if (!last_speed_was_sir) {
+ smsc_ircc_fir_stop(self);
+ smsc_ircc_sir_start(self);
+ }
+ smsc_ircc_set_sir_speed(self, speed);
+ } else {
+ if (last_speed_was_sir) {
+ #if SMSC_IRCC2_C_SIR_STOP
+ smsc_ircc_sir_stop(self);
+ #endif
+ smsc_ircc_fir_start(self);
+ }
+ smsc_ircc_set_fir_speed(self, speed);
+
+ #if 0
+ self->tx_buff.len = 10;
+ self->tx_buff.data = self->tx_buff.head;
+
+ smsc_ircc_dma_xmit(self, 4000);
+ #endif
+ /* Be ready for incoming frames */
+ smsc_ircc_dma_receive(self);
+ }
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function smsc_ircc_set_sir_speed (self, speed)
+ *
+ * Set speed of IrDA port to specified baudrate
+ *
+ */
+static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
+{
+ int iobase;
+ int fcr; /* FIFO control reg */
+ int lcr; /* Line control reg */
+ int divisor;
+
+ pr_debug("%s(), Setting speed to: %d\n", __func__, speed);
+
+ IRDA_ASSERT(self != NULL, return;);
+ iobase = self->io.sir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Turn off interrupts */
+ outb(0, iobase + UART_IER);
+
+ divisor = SMSC_IRCC2_MAX_SIR_SPEED / speed;
+
+ fcr = UART_FCR_ENABLE_FIFO;
+
+ /*
+ * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
+ * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
+ * about this timeout since it will always be fast enough.
+ */
+ fcr |= self->io.speed < 38400 ?
+ UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
+
+ /* IrDA ports use 8N1 */
+ lcr = UART_LCR_WLEN8;
+
+ outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */
+ outb(divisor & 0xff, iobase + UART_DLL); /* Set speed */
+ outb(divisor >> 8, iobase + UART_DLM);
+ outb(lcr, iobase + UART_LCR); /* Set 8N1 */
+ outb(fcr, iobase + UART_FCR); /* Enable FIFO's */
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
+
+ pr_debug("%s() speed changed to: %d\n", __func__, speed);
+}
+
+
+/*
+ * Function smsc_ircc_hard_xmit_fir (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t smsc_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ unsigned long flags;
+ s32 speed;
+ int mtt;
+
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+
+ netif_stop_queue(dev);
+
+ /* Make sure test of self->io.speed & speed change are atomic */
+ spin_lock_irqsave(&self->lock, flags);
+
+ /* Check if we need to change the speed after this frame */
+ speed = irda_get_next_speed(skb);
+ if (speed != self->io.speed && speed != -1) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ /* Note : you should make sure that speed changes
+ * are not going to corrupt any outgoing frame.
+ * Look at nsc-ircc for the gory details - Jean II */
+ smsc_ircc_change_speed(self, speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ self->new_speed = speed;
+ }
+
+ skb_copy_from_linear_data(skb, self->tx_buff.head, skb->len);
+
+ self->tx_buff.len = skb->len;
+ self->tx_buff.data = self->tx_buff.head;
+
+ mtt = irda_get_mtt(skb);
+ if (mtt) {
+ int bofs;
+
+ /*
+ * Compute how many BOFs (STA or PA's) we need to waste the
+ * min turn time given the speed of the link.
+ */
+ bofs = mtt * (self->io.speed / 1000) / 8000;
+ if (bofs > 4095)
+ bofs = 4095;
+
+ smsc_ircc_dma_xmit(self, bofs);
+ } else {
+ /* Transmit frame */
+ smsc_ircc_dma_xmit(self, 0);
+ }
+
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function smsc_ircc_dma_xmit (self, bofs)
+ *
+ * Transmit data using DMA
+ *
+ */
+static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
+{
+ int iobase = self->io.fir_base;
+ u8 ctrl;
+
+ pr_debug("%s\n", __func__);
+#if 1
+ /* Disable Rx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+#endif
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+
+ self->io.direction = IO_XMIT;
+
+ /* Set BOF additional count for generating the min turn time */
+ register_bank(iobase, 4);
+ outb(bofs & 0xff, iobase + IRCC_BOF_COUNT_LO);
+ ctrl = inb(iobase + IRCC_CONTROL) & 0xf0;
+ outb(ctrl | ((bofs >> 8) & 0x0f), iobase + IRCC_BOF_COUNT_HI);
+
+ /* Set max Tx frame size */
+ outb(self->tx_buff.len >> 8, iobase + IRCC_TX_SIZE_HI);
+ outb(self->tx_buff.len & 0xff, iobase + IRCC_TX_SIZE_LO);
+
+ /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/
+
+ /* Enable burst mode chip Tx DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
+ IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
+
+ /* Setup DMA controller (must be done after enabling chip DMA) */
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_TX_MODE);
+
+ /* Enable interrupt */
+
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
+
+ /* Enable transmit */
+ outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B);
+}
+
+/*
+ * Function smsc_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ pr_debug("%s\n", __func__);
+#if 0
+ /* Disable Tx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+#endif
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+
+ /* Check for underrun! */
+ register_bank(iobase, 0);
+ if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+
+ /* Reset error condition */
+ register_bank(iobase, 0);
+ outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
+ outb(0x00, iobase + IRCC_MASTER);
+ } else {
+ self->netdev->stats.tx_packets++;
+ self->netdev->stats.tx_bytes += self->tx_buff.len;
+ }
+
+ /* Check if it's time to change the speed */
+ if (self->new_speed) {
+ smsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ netif_wake_queue(self->netdev);
+}
+
+/*
+ * Function smsc_ircc_dma_receive(self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+#if 0
+ /* Turn off chip DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+#endif
+
+ /* Disable Tx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+
+ /* Turn off chip DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
+ iobase + IRCC_SCE_CFGB);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Set max Rx frame size */
+ register_bank(iobase, 4);
+ outb((2050 >> 8) & 0x0f, iobase + IRCC_RX_SIZE_HI);
+ outb(2050 & 0xff, iobase + IRCC_RX_SIZE_LO);
+
+ /* Setup DMA controller */
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_RX_MODE);
+
+ /* Enable burst mode chip Rx DMA */
+ register_bank(iobase, 1);
+ outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
+ IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
+
+ /* Enable interrupt */
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
+ outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
+
+ /* Enable receiver */
+ register_bank(iobase, 0);
+ outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,
+ iobase + IRCC_LCR_B);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_dma_receive_complete(self)
+ *
+ * Finished with receiving frames
+ *
+ */
+static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
+{
+ struct sk_buff *skb;
+ int len, msgcnt, lsr;
+ int iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+
+ pr_debug("%s\n", __func__);
+#if 0
+ /* Disable Rx */
+ register_bank(iobase, 0);
+ outb(0x00, iobase + IRCC_LCR_B);
+#endif
+ register_bank(iobase, 0);
+ outb(inb(iobase + IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase + IRCC_LSAR);
+ lsr= inb(iobase + IRCC_LSR);
+ msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
+
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
+
+ len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
+
+ /* Look for errors */
+ if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
+ self->netdev->stats.rx_errors++;
+ if (lsr & IRCC_LSR_FRAME_ERROR)
+ self->netdev->stats.rx_frame_errors++;
+ if (lsr & IRCC_LSR_CRC_ERROR)
+ self->netdev->stats.rx_crc_errors++;
+ if (lsr & IRCC_LSR_SIZE_ERROR)
+ self->netdev->stats.rx_length_errors++;
+ if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
+ self->netdev->stats.rx_length_errors++;
+ return;
+ }
+
+ /* Remove CRC */
+ len -= self->io.speed < 4000000 ? 2 : 4;
+
+ if (len < 2 || len > 2050) {
+ net_warn_ratelimited("%s(), bogus len=%d\n", __func__, len);
+ return;
+ }
+ pr_debug("%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
+
+ skb = dev_alloc_skb(len + 1);
+ if (!skb)
+ return;
+
+ /* Make sure IP header gets aligned */
+ skb_reserve(skb, 1);
+
+ skb_put_data(skb, self->rx_buff.data, len);
+ self->netdev->stats.rx_packets++;
+ self->netdev->stats.rx_bytes += len;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+}
+
+/*
+ * Function smsc_ircc_sir_receive (self)
+ *
+ * Receive one frame from the infrared port
+ *
+ */
+static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
+{
+ int boguscount = 0;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ iobase = self->io.sir_base;
+
+ /*
+ * Receive all characters in Rx FIFO, unwrap and unstuff them.
+ * async_unwrap_char will deliver all found frames
+ */
+ do {
+ async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
+ inb(iobase + UART_RX));
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 32) {
+ pr_debug("%s(), breaking!\n", __func__);
+ break;
+ }
+ } while (inb(iobase + UART_LSR) & UART_LSR_DR);
+}
+
+
+/*
+ * Function smsc_ircc_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+ int iobase, iir, lcra, lsr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* Serialise the interrupt handler in various CPUs, stop Tx path */
+ spin_lock(&self->lock);
+
+ /* Check if we should use the SIR interrupt handler */
+ if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
+ ret = smsc_ircc_interrupt_sir(dev);
+ goto irq_ret_unlock;
+ }
+
+ iobase = self->io.fir_base;
+
+ register_bank(iobase, 0);
+ iir = inb(iobase + IRCC_IIR);
+ if (iir == 0)
+ goto irq_ret_unlock;
+ ret = IRQ_HANDLED;
+
+ /* Disable interrupts */
+ outb(0, iobase + IRCC_IER);
+ lcra = inb(iobase + IRCC_LCR_A);
+ lsr = inb(iobase + IRCC_LSR);
+
+ pr_debug("%s(), iir = 0x%02x\n", __func__, iir);
+
+ if (iir & IRCC_IIR_EOM) {
+ if (self->io.direction == IO_RECV)
+ smsc_ircc_dma_receive_complete(self);
+ else
+ smsc_ircc_dma_xmit_complete(self);
+
+ smsc_ircc_dma_receive(self);
+ }
+
+ if (iir & IRCC_IIR_ACTIVE_FRAME) {
+ /*printk(KERN_WARNING "%s(): Active Frame\n", __func__);*/
+ }
+
+ /* Enable interrupts again */
+
+ register_bank(iobase, 0);
+ outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
+
+ irq_ret_unlock:
+ spin_unlock(&self->lock);
+
+ return ret;
+}
+
+/*
+ * Function irport_interrupt_sir (irq, dev_id)
+ *
+ * Interrupt handler for SIR modes
+ */
+static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self = netdev_priv(dev);
+ int boguscount = 0;
+ int iobase;
+ int iir, lsr;
+
+ /* Already locked coming here in smsc_ircc_interrupt() */
+ /*spin_lock(&self->lock);*/
+
+ iobase = self->io.sir_base;
+
+ iir = inb(iobase + UART_IIR) & UART_IIR_ID;
+ if (iir == 0)
+ return IRQ_NONE;
+ while (iir) {
+ /* Clear interrupt */
+ lsr = inb(iobase + UART_LSR);
+
+ pr_debug("%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
+ __func__, iir, lsr, iobase);
+
+ switch (iir) {
+ case UART_IIR_RLSI:
+ pr_debug("%s(), RLSI\n", __func__);
+ break;
+ case UART_IIR_RDI:
+ /* Receive interrupt */
+ smsc_ircc_sir_receive(self);
+ break;
+ case UART_IIR_THRI:
+ if (lsr & UART_LSR_THRE)
+ /* Transmitter ready for data */
+ smsc_ircc_sir_write_wakeup(self);
+ break;
+ default:
+ pr_debug("%s(), unhandled IIR=%#x\n",
+ __func__, iir);
+ break;
+ }
+
+ /* Make sure we don't stay here to long */
+ if (boguscount++ > 100)
+ break;
+
+ iir = inb(iobase + UART_IIR) & UART_IIR_ID;
+ }
+ /*spin_unlock(&self->lock);*/
+ return IRQ_HANDLED;
+}
+
+
+#if 0 /* unused */
+/*
+ * Function ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int ircc_is_receiving(struct smsc_ircc_cb *self)
+{
+ int status = FALSE;
+ /* int iobase; */
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ pr_debug("%s: dma count = %d\n", __func__,
+ get_dma_residue(self->io.dma));
+
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+
+ return status;
+}
+#endif /* unused */
+
+static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
+{
+ int error;
+
+ error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
+ self->netdev->name, self->netdev);
+ if (error)
+ pr_debug("%s(), unable to allocate irq=%d, err=%d\n",
+ __func__, self->io.irq, error);
+
+ return error;
+}
+
+static void smsc_ircc_start_interrupts(struct smsc_ircc_cb *self)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ self->io.speed = 0;
+ smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static void smsc_ircc_stop_interrupts(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ register_bank(iobase, 0);
+ outb(0, iobase + IRCC_IER);
+ outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
+ outb(0x00, iobase + IRCC_MASTER);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+
+/*
+ * Function smsc_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int smsc_ircc_net_open(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+ char hwname[16];
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ if (self->io.suspended) {
+ pr_debug("%s(), device is suspended\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
+ (void *) dev)) {
+ pr_debug("%s(), unable to allocate irq=%d\n",
+ __func__, self->io.irq);
+ return -EAGAIN;
+ }
+
+ smsc_ircc_start_interrupts(self);
+
+ /* Give self a hardware name */
+ /* It would be cool to offer the chip revision here - Jean II */
+ sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ smsc_ircc_net_close(dev);
+
+ net_warn_ratelimited("%s(), unable to allocate DMA=%d\n",
+ __func__, self->io.dma);
+ return -EAGAIN;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int smsc_ircc_net_close(struct net_device *dev)
+{
+ struct smsc_ircc_cb *self;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ smsc_ircc_stop_interrupts(self);
+
+ /* if we are called from smsc_ircc_resume we don't have IRQ reserved */
+ if (!self->io.suspended)
+ free_irq(self->io.irq, dev);
+
+ disable_dma(self->io.dma);
+ free_dma(self->io.dma);
+
+ return 0;
+}
+
+static int smsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct smsc_ircc_cb *self = platform_get_drvdata(dev);
+
+ if (!self->io.suspended) {
+ pr_debug("%s, Suspending\n", driver_name);
+
+ rtnl_lock();
+ if (netif_running(self->netdev)) {
+ netif_device_detach(self->netdev);
+ smsc_ircc_stop_interrupts(self);
+ free_irq(self->io.irq, self->netdev);
+ disable_dma(self->io.dma);
+ }
+ self->io.suspended = 1;
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+static int smsc_ircc_resume(struct platform_device *dev)
+{
+ struct smsc_ircc_cb *self = platform_get_drvdata(dev);
+
+ if (self->io.suspended) {
+ pr_debug("%s, Waking up\n", driver_name);
+
+ rtnl_lock();
+ smsc_ircc_init_chip(self);
+ if (netif_running(self->netdev)) {
+ if (smsc_ircc_request_irq(self)) {
+ /*
+ * Don't fail resume process, just kill this
+ * network interface
+ */
+ unregister_netdevice(self->netdev);
+ } else {
+ enable_dma(self->io.dma);
+ smsc_ircc_start_interrupts(self);
+ netif_device_attach(self->netdev);
+ }
+ }
+ self->io.suspended = 0;
+ rtnl_unlock();
+ }
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
+{
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return -1;);
+
+ platform_device_unregister(self->pldev);
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ smsc_ircc_stop_interrupts(self);
+
+ /* Release the PORTS that this driver is using */
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.fir_base);
+
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ pr_debug("%s(), releasing 0x%03x\n", __func__,
+ self->io.sir_base);
+
+ release_region(self->io.sir_base, self->io.sir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+static void __exit smsc_ircc_cleanup(void)
+{
+ int i;
+
+ pr_debug("%s\n", __func__);
+
+ for (i = 0; i < 2; i++) {
+ if (dev_self[i])
+ smsc_ircc_close(dev_self[i]);
+ }
+
+ if (pnp_driver_registered)
+ pnp_unregister_driver(&smsc_ircc_pnp_driver);
+
+ platform_driver_unregister(&smsc_ircc_driver);
+}
+
+/*
+ * Start SIR operations
+ *
+ * This function *must* be called with spinlock held, because it may
+ * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II
+ */
+static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
+{
+ struct net_device *dev;
+ int fir_base, sir_base;
+
+ pr_debug("%s\n", __func__);
+
+ IRDA_ASSERT(self != NULL, return;);
+ dev = self->netdev;
+ IRDA_ASSERT(dev != NULL, return;);
+
+ fir_base = self->io.fir_base;
+ sir_base = self->io.sir_base;
+
+ /* Reset everything */
+ outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);
+
+ #if SMSC_IRCC2_C_SIR_STOP
+ /*smsc_ircc_sir_stop(self);*/
+ #endif
+
+ register_bank(fir_base, 1);
+ outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base + IRCC_SCE_CFGA);
+
+ /* Initialize UART */
+ outb(UART_LCR_WLEN8, sir_base + UART_LCR); /* Reset DLAB */
+ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base + UART_MCR);
+
+ /* Turn on interrups */
+ outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
+
+ pr_debug("%s() - exit\n", __func__);
+
+ outb(0x00, fir_base + IRCC_MASTER);
+}
+
+#if SMSC_IRCC2_C_SIR_STOP
+void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
+{
+ int iobase;
+
+ pr_debug("%s\n", __func__);
+ iobase = self->io.sir_base;
+
+ /* Reset UART */
+ outb(0, iobase + UART_MCR);
+
+ /* Turn off interrupts */
+ outb(0, iobase + UART_IER);
+}
+#endif
+
+/*
+ * Function smsc_sir_write_wakeup (self)
+ *
+ * Called by the SIR interrupt handler when there's room for more data.
+ * If we have more packets to send, we send them here.
+ *
+ */
+static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
+{
+ int actual = 0;
+ int iobase;
+ int fcr;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ pr_debug("%s\n", __func__);
+
+ iobase = self->io.sir_base;
+
+ /* Finished with frame? */
+ if (self->tx_buff.len > 0) {
+ /* Write data left in transmit buffer */
+ actual = smsc_ircc_sir_write(iobase, self->io.fifo_size,
+ self->tx_buff.data, self->tx_buff.len);
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+ } else {
+
+ /*if (self->tx_buff.len ==0) {*/
+
+ /*
+ * Now serial buffer is almost free & we can start
+ * transmission of another packet. But first we must check
+ * if we need to change the speed of the hardware
+ */
+ if (self->new_speed) {
+ pr_debug("%s(), Changing speed to %d.\n",
+ __func__, self->new_speed);
+ smsc_ircc_sir_wait_hw_transmitter_finish(self);
+ smsc_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ } else {
+ /* Tell network layer that we want more frames */
+ netif_wake_queue(self->netdev);
+ }
+ self->netdev->stats.tx_packets++;
+
+ if (self->io.speed <= 115200) {
+ /*
+ * Reset Rx FIFO to make sure that all reflected transmit data
+ * is discarded. This is needed for half duplex operation
+ */
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
+ fcr |= self->io.speed < 38400 ?
+ UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
+
+ outb(fcr, iobase + UART_FCR);
+
+ /* Turn on receive interrupts */
+ outb(UART_IER_RDI, iobase + UART_IER);
+ }
+ }
+}
+
+/*
+ * Function smsc_ircc_sir_write (iobase, fifo_size, buf, len)
+ *
+ * Fill Tx FIFO with transmit data
+ *
+ */
+static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
+{
+ int actual = 0;
+
+ /* Tx FIFO should be empty! */
+ if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
+ net_warn_ratelimited("%s(), failed, fifo not empty!\n",
+ __func__);
+ return 0;
+ }
+
+ /* Fill FIFO with current frame */
+ while (fifo_size-- > 0 && actual < len) {
+ /* Transmit next byte */
+ outb(buf[actual], iobase + UART_TX);
+ actual++;
+ }
+ return actual;
+}
+
+/*
+ * Function smsc_ircc_is_receiving (self)
+ *
+ * Returns true is we are currently receiving data
+ *
+ */
+static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
+{
+ return self->rx_buff.state != OUTSIDE_FRAME;
+}
+
+
+/*
+ * Function smsc_ircc_probe_transceiver(self)
+ *
+ * Tries to find the used Transceiver
+ *
+ */
+static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
+{
+ unsigned int i;
+
+ IRDA_ASSERT(self != NULL, return;);
+
+ for (i = 0; smsc_transceivers[i].name != NULL; i++)
+ if (smsc_transceivers[i].probe(self->io.fir_base)) {
+ net_info_ratelimited(" %s transceiver found\n",
+ smsc_transceivers[i].name);
+ self->transceiver= i + 1;
+ return;
+ }
+
+ net_info_ratelimited("No transceiver found. Defaulting to %s\n",
+ smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
+
+ self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
+}
+
+
+/*
+ * Function smsc_ircc_set_transceiver_for_speed(self, speed)
+ *
+ * Set the transceiver according to the speed
+ *
+ */
+static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed)
+{
+ unsigned int trx;
+
+ trx = self->transceiver;
+ if (trx > 0)
+ smsc_transceivers[trx - 1].set_for_speed(self->io.fir_base, speed);
+}
+
+/*
+ * Function smsc_ircc_wait_hw_transmitter_finish ()
+ *
+ * Wait for the real end of HW transmission
+ *
+ * The UART is a strict FIFO, and we get called only when we have finished
+ * pushing data to the FIFO, so the maximum amount of time we must wait
+ * is only for the FIFO to drain out.
+ *
+ * We use a simple calibrated loop. We may need to adjust the loop
+ * delay (udelay) to balance I/O traffic and latency. And we also need to
+ * adjust the maximum timeout.
+ * It would probably be better to wait for the proper interrupt,
+ * but it doesn't seem to be available.
+ *
+ * We can't use jiffies or kernel timers because :
+ * 1) We are called from the interrupt handler, which disable softirqs,
+ * so jiffies won't be increased
+ * 2) Jiffies granularity is usually very coarse (10ms), and we don't
+ * want to wait that long to detect stuck hardware.
+ * Jean II
+ */
+
+static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
+{
+ int iobase = self->io.sir_base;
+ int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US;
+
+ /* Calibrated busy loop */
+ while (count-- > 0 && !(inb(iobase + UART_LSR) & UART_LSR_TEMT))
+ udelay(1);
+
+ if (count < 0)
+ pr_debug("%s(): stuck transmitter\n", __func__);
+}
+
+
+/* PROBING
+ *
+ * REVISIT we can be told about the device by PNP, and should use that info
+ * instead of probing hardware and creating a platform_device ...
+ */
+
+static int __init smsc_ircc_look_for_chips(void)
+{
+ struct smsc_chip_address *address;
+ char *type;
+ unsigned int cfg_base, found;
+
+ found = 0;
+ address = possible_addresses;
+
+ while (address->cfg_base) {
+ cfg_base = address->cfg_base;
+
+ /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __func__, cfg_base, address->type);*/
+
+ if (address->type & SMSCSIO_TYPE_FDC) {
+ type = "FDC";
+ if (address->type & SMSCSIO_TYPE_FLAT)
+ if (!smsc_superio_flat(fdc_chips_flat, cfg_base, type))
+ found++;
+
+ if (address->type & SMSCSIO_TYPE_PAGED)
+ if (!smsc_superio_paged(fdc_chips_paged, cfg_base, type))
+ found++;
+ }
+ if (address->type & SMSCSIO_TYPE_LPC) {
+ type = "LPC";
+ if (address->type & SMSCSIO_TYPE_FLAT)
+ if (!smsc_superio_flat(lpc_chips_flat, cfg_base, type))
+ found++;
+
+ if (address->type & SMSCSIO_TYPE_PAGED)
+ if (!smsc_superio_paged(lpc_chips_paged, cfg_base, type))
+ found++;
+ }
+ address++;
+ }
+ return found;
+}
+
+/*
+ * Function smsc_superio_flat (chip, base, type)
+ *
+ * Try to get configuration of a smc SuperIO chip with flat register model
+ *
+ */
+static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfgbase, char *type)
+{
+ unsigned short firbase, sirbase;
+ u8 mode, dma, irq;
+ int ret = -ENODEV;
+
+ pr_debug("%s\n", __func__);
+
+ if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
+ return ret;
+
+ outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
+ mode = inb(cfgbase + 1);
+
+ /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
+
+ if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
+ net_warn_ratelimited("%s(): IrDA not enabled\n", __func__);
+
+ outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
+ sirbase = inb(cfgbase + 1) << 2;
+
+ /* FIR iobase */
+ outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase);
+ firbase = inb(cfgbase + 1) << 3;
+
+ /* DMA */
+ outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase);
+ dma = inb(cfgbase + 1) & SMSCSIOFLAT_FIRDMASELECT_MASK;
+
+ /* IRQ */
+ outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
+ irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
+
+ net_info_ratelimited("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n",
+ __func__, firbase, sirbase, dma, irq, mode);
+
+ if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
+ ret = 0;
+
+ /* Exit configuration */
+ outb(SMSCSIO_CFGEXITKEY, cfgbase);
+
+ return ret;
+}
+
+/*
+ * Function smsc_superio_paged (chip, base, type)
+ *
+ * Try to get configuration of a smc SuperIO chip with paged register model
+ *
+ */
+static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type)
+{
+ unsigned short fir_io, sir_io;
+ int ret = -ENODEV;
+
+ pr_debug("%s\n", __func__);
+
+ if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
+ return ret;
+
+ /* Select logical device (UART2) */
+ outb(0x07, cfg_base);
+ outb(0x05, cfg_base + 1);
+
+ /* SIR iobase */
+ outb(0x60, cfg_base);
+ sir_io = inb(cfg_base + 1) << 8;
+ outb(0x61, cfg_base);
+ sir_io |= inb(cfg_base + 1);
+
+ /* Read FIR base */
+ outb(0x62, cfg_base);
+ fir_io = inb(cfg_base + 1) << 8;
+ outb(0x63, cfg_base);
+ fir_io |= inb(cfg_base + 1);
+ outb(0x2b, cfg_base); /* ??? */
+
+ if (fir_io && smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0)
+ ret = 0;
+
+ /* Exit configuration */
+ outb(SMSCSIO_CFGEXITKEY, cfg_base);
+
+ return ret;
+}
+
+
+static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
+{
+ pr_debug("%s\n", __func__);
+
+ outb(reg, cfg_base);
+ return inb(cfg_base) != reg ? -1 : 0;
+}
+
+static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+{
+ u8 devid, xdevid, rev;
+
+ pr_debug("%s\n", __func__);
+
+ /* Leave configuration */
+
+ outb(SMSCSIO_CFGEXITKEY, cfg_base);
+
+ if (inb(cfg_base) == SMSCSIO_CFGEXITKEY) /* not a smc superio chip */
+ return NULL;
+
+ outb(reg, cfg_base);
+
+ xdevid = inb(cfg_base + 1);
+
+ /* Enter configuration */
+
+ outb(SMSCSIO_CFGACCESSKEY, cfg_base);
+
+ #if 0
+ if (smsc_access(cfg_base,0x55)) /* send second key and check */
+ return NULL;
+ #endif
+
+ /* probe device ID */
+
+ if (smsc_access(cfg_base, reg))
+ return NULL;
+
+ devid = inb(cfg_base + 1);
+
+ if (devid == 0 || devid == 0xff) /* typical values for unused port */
+ return NULL;
+
+ /* probe revision ID */
+
+ if (smsc_access(cfg_base, reg + 1))
+ return NULL;
+
+ rev = inb(cfg_base + 1);
+
+ if (rev >= 128) /* i think this will make no sense */
+ return NULL;
+
+ if (devid == xdevid) /* protection against false positives */
+ return NULL;
+
+ /* Check for expected device ID; are there others? */
+
+ while (chip->devid != devid) {
+
+ chip++;
+
+ if (chip->name == NULL)
+ return NULL;
+ }
+
+ net_info_ratelimited("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
+ devid, rev, cfg_base, type, chip->name);
+
+ if (chip->rev > rev) {
+ net_info_ratelimited("Revision higher than expected\n");
+ return NULL;
+ }
+
+ if (chip->flags & NoIRDA)
+ net_info_ratelimited("chipset does not support IRDA\n");
+
+ return chip;
+}
+
+static int __init smsc_superio_fdc(unsigned short cfg_base)
+{
+ int ret = -1;
+
+ if (!request_region(cfg_base, 2, driver_name)) {
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
+ } else {
+ if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
+ !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
+ ret = 0;
+
+ release_region(cfg_base, 2);
+ }
+
+ return ret;
+}
+
+static int __init smsc_superio_lpc(unsigned short cfg_base)
+{
+ int ret = -1;
+
+ if (!request_region(cfg_base, 2, driver_name)) {
+ net_warn_ratelimited("%s: can't get cfg_base of 0x%03x\n",
+ __func__, cfg_base);
+ } else {
+ if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
+ !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
+ ret = 0;
+
+ release_region(cfg_base, 2);
+ }
+ return ret;
+}
+
+/*
+ * Look for some specific subsystem setups that need
+ * pre-configuration not properly done by the BIOS (especially laptops)
+ * This code is based in part on smcinit.c, tosh1800-smcinit.c
+ * and tosh2450-smcinit.c. The table lists the device entries
+ * for ISA bridges with an LPC (Low Pin Count) controller which
+ * handles the communication with the SMSC device. After the LPC
+ * controller is initialized through PCI, the SMSC device is initialized
+ * through a dedicated port in the ISA port-mapped I/O area, this latter
+ * area is used to configure the SMSC device with default
+ * SIR and FIR I/O ports, DMA and IRQ. Different vendors have
+ * used different sets of parameters and different control port
+ * addresses making a subsystem device table necessary.
+ */
+#ifdef CONFIG_PCI
+static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
+ /*
+ * Subsystems needing entries:
+ * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
+ * 0x10b9:0x1533 0x0e11:0x005a Compaq nc4000 family
+ * 0x8086:0x24cc 0x0e11:0x002a HP nx9000 family
+ */
+ {
+ /* Guessed entry */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x103c,
+ .subdevice = 0x08bc,
+ .sir_io = 0x02f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x05,
+ .fir_dma = 0x03,
+ .cfg_base = 0x004e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "HP nx5000 family",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x103c,
+ .subdevice = 0x088c,
+ /* Quite certain these are the same for nc8000 as for nc6000 */
+ .sir_io = 0x02f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x05,
+ .fir_dma = 0x03,
+ .cfg_base = 0x004e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "HP nc8000 family",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x103c,
+ .subdevice = 0x0890,
+ .sir_io = 0x02f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x05,
+ .fir_dma = 0x03,
+ .cfg_base = 0x004e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "HP nc6000 family",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
+ .device = 0x24cc,
+ .subvendor = 0x0e11,
+ .subdevice = 0x0860,
+ /* I assume these are the same for x1000 as for the others */
+ .sir_io = 0x02e8,
+ .fir_io = 0x02f8,
+ .fir_irq = 0x07,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Compaq x1000 family",
+ },
+ {
+ /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x24c0,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x03f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x07,
+ .fir_dma = 0x01,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */
+ .device = 0x248c,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x03f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x03,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Toshiba laptop with Intel 82801CAM ISA bridge",
+ },
+ {
+ /* 82801DBM (ICH4-M) LPC Interface Bridge */
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x24cc,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x03f8,
+ .fir_io = 0x0130,
+ .fir_irq = 0x03,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_82801,
+ .name = "Toshiba laptop with Intel 8281DBM LPC bridge",
+ },
+ {
+ /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
+ .vendor = PCI_VENDOR_ID_AL,
+ .device = 0x1533,
+ .subvendor = 0x1179,
+ .subdevice = 0xffff, /* 0xffff is "any" */
+ .sir_io = 0x02e8,
+ .fir_io = 0x02f8,
+ .fir_irq = 0x07,
+ .fir_dma = 0x03,
+ .cfg_base = 0x002e,
+ .preconfigure = preconfigure_through_ali,
+ .name = "Toshiba laptop with ALi ISA bridge",
+ },
+ { } // Terminator
+};
+
+
+/*
+ * This sets up the basic SMSC parameters
+ * (FIR port, SIR port, FIR DMA, FIR IRQ)
+ * through the chip configuration port.
+ */
+static int __init preconfigure_smsc_chip(struct
+ smsc_ircc_subsystem_configuration
+ *conf)
+{
+ unsigned short iobase = conf->cfg_base;
+ unsigned char tmpbyte;
+
+ outb(LPC47N227_CFGACCESSKEY, iobase); // enter configuration state
+ outb(SMSCSIOFLAT_DEVICEID_REG, iobase); // set for device ID
+ tmpbyte = inb(iobase +1); // Read device ID
+ pr_debug("Detected Chip id: 0x%02x, setting up registers...\n",
+ tmpbyte);
+
+ /* Disable UART1 and set up SIR I/O port */
+ outb(0x24, iobase); // select CR24 - UART1 base addr
+ outb(0x00, iobase + 1); // disable UART1
+ outb(SMSCSIOFLAT_UART2BASEADDR_REG, iobase); // select CR25 - UART2 base addr
+ outb( (conf->sir_io >> 2), iobase + 1); // bits 2-9 of 0x3f8
+ tmpbyte = inb(iobase + 1);
+ if (tmpbyte != (conf->sir_io >> 2) ) {
+ net_warn_ratelimited("ERROR: could not configure SIR ioport\n");
+ net_warn_ratelimited("Try to supply ircc_cfg argument\n");
+ return -ENXIO;
+ }
+
+ /* Set up FIR IRQ channel for UART2 */
+ outb(SMSCSIOFLAT_UARTIRQSELECT_REG, iobase); // select CR28 - UART1,2 IRQ select
+ tmpbyte = inb(iobase + 1);
+ tmpbyte &= SMSCSIOFLAT_UART1IRQSELECT_MASK; // Do not touch the UART1 portion
+ tmpbyte |= (conf->fir_irq & SMSCSIOFLAT_UART2IRQSELECT_MASK);
+ outb(tmpbyte, iobase + 1);
+ tmpbyte = inb(iobase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
+ if (tmpbyte != conf->fir_irq) {
+ net_warn_ratelimited("ERROR: could not configure FIR IRQ channel\n");
+ return -ENXIO;
+ }
+
+ /* Set up FIR I/O port */
+ outb(SMSCSIOFLAT_FIRBASEADDR_REG, iobase); // CR2B - SCE (FIR) base addr
+ outb((conf->fir_io >> 3), iobase + 1);
+ tmpbyte = inb(iobase + 1);
+ if (tmpbyte != (conf->fir_io >> 3) ) {
+ net_warn_ratelimited("ERROR: could not configure FIR I/O port\n");
+ return -ENXIO;
+ }
+
+ /* Set up FIR DMA channel */
+ outb(SMSCSIOFLAT_FIRDMASELECT_REG, iobase); // CR2C - SCE (FIR) DMA select
+ outb((conf->fir_dma & LPC47N227_FIRDMASELECT_MASK), iobase + 1); // DMA
+ tmpbyte = inb(iobase + 1) & LPC47N227_FIRDMASELECT_MASK;
+ if (tmpbyte != (conf->fir_dma & LPC47N227_FIRDMASELECT_MASK)) {
+ net_warn_ratelimited("ERROR: could not configure FIR DMA channel\n");
+ return -ENXIO;
+ }
+
+ outb(SMSCSIOFLAT_UARTMODE0C_REG, iobase); // CR0C - UART mode
+ tmpbyte = inb(iobase + 1);
+ tmpbyte &= ~SMSCSIOFLAT_UART2MODE_MASK |
+ SMSCSIOFLAT_UART2MODE_VAL_IRDA;
+ outb(tmpbyte, iobase + 1); // enable IrDA (HPSIR) mode, high speed
+
+ outb(LPC47N227_APMBOOTDRIVE_REG, iobase); // CR07 - Auto Pwr Mgt/boot drive sel
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | LPC47N227_UART2AUTOPWRDOWN_MASK, iobase + 1); // enable UART2 autopower down
+
+ /* This one was not part of tosh1800 */
+ outb(0x0a, iobase); // CR0a - ecp fifo / ir mux
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | 0x40, iobase + 1); // send active device to ir port
+
+ outb(LPC47N227_UART12POWER_REG, iobase); // CR02 - UART 1,2 power
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | LPC47N227_UART2POWERDOWN_MASK, iobase + 1); // UART2 power up mode, UART1 power down
+
+ outb(LPC47N227_FDCPOWERVALIDCONF_REG, iobase); // CR00 - FDC Power/valid config cycle
+ tmpbyte = inb(iobase + 1);
+ outb(tmpbyte | LPC47N227_VALID_MASK, iobase + 1); // valid config cycle done
+
+ outb(LPC47N227_CFGEXITKEY, iobase); // Exit configuration
+
+ return 0;
+}
+
+/* 82801CAM generic registers */
+#define VID 0x00
+#define DID 0x02
+#define PIRQ_A_D_ROUT 0x60
+#define SIRQ_CNTL 0x64
+#define PIRQ_E_H_ROUT 0x68
+#define PCI_DMA_C 0x90
+/* LPC-specific registers */
+#define COM_DEC 0xe0
+#define GEN1_DEC 0xe4
+#define LPC_EN 0xe6
+#define GEN2_DEC 0xec
+/*
+ * Sets up the I/O range using the 82801CAM ISA bridge, 82801DBM LPC bridge
+ * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
+ * They all work the same way!
+ */
+static int __init preconfigure_through_82801(struct pci_dev *dev,
+ struct
+ smsc_ircc_subsystem_configuration
+ *conf)
+{
+ unsigned short tmpword;
+ unsigned char tmpbyte;
+
+ net_info_ratelimited("Setting up Intel 82801 controller and SMSC device\n");
+ /*
+ * Select the range for the COMA COM port (SIR)
+ * Register COM_DEC:
+ * Bit 7: reserved
+ * Bit 6-4, COMB decode range
+ * Bit 3: reserved
+ * Bit 2-0, COMA decode range
+ *
+ * Decode ranges:
+ * 000 = 0x3f8-0x3ff (COM1)
+ * 001 = 0x2f8-0x2ff (COM2)
+ * 010 = 0x220-0x227
+ * 011 = 0x228-0x22f
+ * 100 = 0x238-0x23f
+ * 101 = 0x2e8-0x2ef (COM4)
+ * 110 = 0x338-0x33f
+ * 111 = 0x3e8-0x3ef (COM3)
+ */
+ pci_read_config_byte(dev, COM_DEC, &tmpbyte);
+ tmpbyte &= 0xf8; /* mask COMA bits */
+ switch(conf->sir_io) {
+ case 0x3f8:
+ tmpbyte |= 0x00;
+ break;
+ case 0x2f8:
+ tmpbyte |= 0x01;
+ break;
+ case 0x220:
+ tmpbyte |= 0x02;
+ break;
+ case 0x228:
+ tmpbyte |= 0x03;
+ break;
+ case 0x238:
+ tmpbyte |= 0x04;
+ break;
+ case 0x2e8:
+ tmpbyte |= 0x05;
+ break;
+ case 0x338:
+ tmpbyte |= 0x06;
+ break;
+ case 0x3e8:
+ tmpbyte |= 0x07;
+ break;
+ default:
+ tmpbyte |= 0x01; /* COM2 default */
+ }
+ pr_debug("COM_DEC (write): 0x%02x\n", tmpbyte);
+ pci_write_config_byte(dev, COM_DEC, tmpbyte);
+
+ /* Enable Low Pin Count interface */
+ pci_read_config_word(dev, LPC_EN, &tmpword);
+ /* These seem to be set up at all times,
+ * just make sure it is properly set.
+ */
+ switch(conf->cfg_base) {
+ case 0x04e:
+ tmpword |= 0x2000;
+ break;
+ case 0x02e:
+ tmpword |= 0x1000;
+ break;
+ case 0x062:
+ tmpword |= 0x0800;
+ break;
+ case 0x060:
+ tmpword |= 0x0400;
+ break;
+ default:
+ net_warn_ratelimited("Uncommon I/O base address: 0x%04x\n",
+ conf->cfg_base);
+ break;
+ }
+ tmpword &= 0xfffd; /* disable LPC COMB */
+ tmpword |= 0x0001; /* set bit 0 : enable LPC COMA addr range (GEN2) */
+ pr_debug("LPC_EN (write): 0x%04x\n", tmpword);
+ pci_write_config_word(dev, LPC_EN, tmpword);
+
+ /*
+ * Configure LPC DMA channel
+ * PCI_DMA_C bits:
+ * Bit 15-14: DMA channel 7 select
+ * Bit 13-12: DMA channel 6 select
+ * Bit 11-10: DMA channel 5 select
+ * Bit 9-8: Reserved
+ * Bit 7-6: DMA channel 3 select
+ * Bit 5-4: DMA channel 2 select
+ * Bit 3-2: DMA channel 1 select
+ * Bit 1-0: DMA channel 0 select
+ * 00 = Reserved value
+ * 01 = PC/PCI DMA
+ * 10 = Reserved value
+ * 11 = LPC I/F DMA
+ */
+ pci_read_config_word(dev, PCI_DMA_C, &tmpword);
+ switch(conf->fir_dma) {
+ case 0x07:
+ tmpword |= 0xc000;
+ break;
+ case 0x06:
+ tmpword |= 0x3000;
+ break;
+ case 0x05:
+ tmpword |= 0x0c00;
+ break;
+ case 0x03:
+ tmpword |= 0x00c0;
+ break;
+ case 0x02:
+ tmpword |= 0x0030;
+ break;
+ case 0x01:
+ tmpword |= 0x000c;
+ break;
+ case 0x00:
+ tmpword |= 0x0003;
+ break;
+ default:
+ break; /* do not change settings */
+ }
+ pr_debug("PCI_DMA_C (write): 0x%04x\n", tmpword);
+ pci_write_config_word(dev, PCI_DMA_C, tmpword);
+
+ /*
+ * GEN2_DEC bits:
+ * Bit 15-4: Generic I/O range
+ * Bit 3-1: reserved (read as 0)
+ * Bit 0: enable GEN2 range on LPC I/F
+ */
+ tmpword = conf->fir_io & 0xfff8;
+ tmpword |= 0x0001;
+ pr_debug("GEN2_DEC (write): 0x%04x\n", tmpword);
+ pci_write_config_word(dev, GEN2_DEC, tmpword);
+
+ /* Pre-configure chip */
+ return preconfigure_smsc_chip(conf);
+}
+
+/*
+ * Pre-configure a certain port on the ALi 1533 bridge.
+ * This is based on reverse-engineering since ALi does not
+ * provide any data sheet for the 1533 chip.
+ */
+static void __init preconfigure_ali_port(struct pci_dev *dev,
+ unsigned short port)
+{
+ unsigned char reg;
+ /* These bits obviously control the different ports */
+ unsigned char mask;
+ unsigned char tmpbyte;
+
+ switch(port) {
+ case 0x0130:
+ case 0x0178:
+ reg = 0xb0;
+ mask = 0x80;
+ break;
+ case 0x03f8:
+ reg = 0xb4;
+ mask = 0x80;
+ break;
+ case 0x02f8:
+ reg = 0xb4;
+ mask = 0x30;
+ break;
+ case 0x02e8:
+ reg = 0xb4;
+ mask = 0x08;
+ break;
+ default:
+ net_err_ratelimited("Failed to configure unsupported port on ALi 1533 bridge: 0x%04x\n",
+ port);
+ return;
+ }
+
+ pci_read_config_byte(dev, reg, &tmpbyte);
+ /* Turn on the right bits */
+ tmpbyte |= mask;
+ pci_write_config_byte(dev, reg, tmpbyte);
+ net_info_ratelimited("Activated ALi 1533 ISA bridge port 0x%04x\n",
+ port);
+}
+
+static int __init preconfigure_through_ali(struct pci_dev *dev,
+ struct
+ smsc_ircc_subsystem_configuration
+ *conf)
+{
+ /* Configure the two ports on the ALi 1533 */
+ preconfigure_ali_port(dev, conf->sir_io);
+ preconfigure_ali_port(dev, conf->fir_io);
+
+ /* Pre-configure chip */
+ return preconfigure_smsc_chip(conf);
+}
+
+static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+ unsigned short ircc_fir,
+ unsigned short ircc_sir,
+ unsigned char ircc_dma,
+ unsigned char ircc_irq)
+{
+ struct pci_dev *dev = NULL;
+ unsigned short ss_vendor = 0x0000;
+ unsigned short ss_device = 0x0000;
+ int ret = 0;
+
+ for_each_pci_dev(dev) {
+ struct smsc_ircc_subsystem_configuration *conf;
+
+ /*
+ * Cache the subsystem vendor/device:
+ * some manufacturers fail to set this for all components,
+ * so we save it in case there is just 0x0000 0x0000 on the
+ * device we want to check.
+ */
+ if (dev->subsystem_vendor != 0x0000U) {
+ ss_vendor = dev->subsystem_vendor;
+ ss_device = dev->subsystem_device;
+ }
+ conf = subsystem_configurations;
+ for( ; conf->subvendor; conf++) {
+ if(conf->vendor == dev->vendor &&
+ conf->device == dev->device &&
+ conf->subvendor == ss_vendor &&
+ /* Sometimes these are cached values */
+ (conf->subdevice == ss_device ||
+ conf->subdevice == 0xffff)) {
+ struct smsc_ircc_subsystem_configuration
+ tmpconf;
+
+ memcpy(&tmpconf, conf,
+ sizeof(struct smsc_ircc_subsystem_configuration));
+
+ /*
+ * Override the default values with anything
+ * passed in as parameter
+ */
+ if (ircc_cfg != 0)
+ tmpconf.cfg_base = ircc_cfg;
+ if (ircc_fir != 0)
+ tmpconf.fir_io = ircc_fir;
+ if (ircc_sir != 0)
+ tmpconf.sir_io = ircc_sir;
+ if (ircc_dma != DMA_INVAL)
+ tmpconf.fir_dma = ircc_dma;
+ if (ircc_irq != IRQ_INVAL)
+ tmpconf.fir_irq = ircc_irq;
+
+ net_info_ratelimited("Detected unconfigured %s SMSC IrDA chip, pre-configuring device\n",
+ conf->name);
+ if (conf->preconfigure)
+ ret = conf->preconfigure(dev, &tmpconf);
+ else
+ ret = -ENODEV;
+ }
+ }
+ }
+
+ return ret;
+}
+#endif // CONFIG_PCI
+
+/************************************************
+ *
+ * Transceivers specific functions
+ *
+ ************************************************/
+
+
+/*
+ * Function smsc_ircc_set_transceiver_smsc_ircc_atc(fir_base, speed)
+ *
+ * Program transceiver through smsc-ircc ATC circuitry
+ *
+ */
+
+static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
+{
+ unsigned long jiffies_now, jiffies_timeout;
+ u8 val;
+
+ jiffies_now = jiffies;
+ jiffies_timeout = jiffies + SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES;
+
+ /* ATC */
+ register_bank(fir_base, 4);
+ outb((inb(fir_base + IRCC_ATC) & IRCC_ATC_MASK) | IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE,
+ fir_base + IRCC_ATC);
+
+ while ((val = (inb(fir_base + IRCC_ATC) & IRCC_ATC_nPROGREADY)) &&
+ !time_after(jiffies, jiffies_timeout))
+ /* empty */;
+
+ if (val)
+ net_warn_ratelimited("%s(): ATC: 0x%02x\n",
+ __func__, inb(fir_base + IRCC_ATC));
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_smsc_ircc_atc(fir_base)
+ *
+ * Probe transceiver smsc-ircc ATC circuitry
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base)
+{
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed)
+ *
+ * Set transceiver
+ *
+ */
+
+static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed)
+{
+ u8 fast_mode;
+
+ switch (speed) {
+ default:
+ case 576000 :
+ fast_mode = 0;
+ break;
+ case 1152000 :
+ case 4000000 :
+ fast_mode = IRCC_LCR_A_FAST;
+ break;
+ }
+ register_bank(fir_base, 0);
+ outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base)
+ *
+ * Probe transceiver
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base)
+{
+ return 0;
+}
+
+/*
+ * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed)
+ *
+ * Set transceiver
+ *
+ */
+
+static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed)
+{
+ u8 fast_mode;
+
+ switch (speed) {
+ default:
+ case 576000 :
+ fast_mode = 0;
+ break;
+ case 1152000 :
+ case 4000000 :
+ fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA;
+ break;
+
+ }
+ /* This causes an interrupt */
+ register_bank(fir_base, 0);
+ outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
+}
+
+/*
+ * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base)
+ *
+ * Probe transceiver
+ *
+ */
+
+static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base)
+{
+ return 0;
+}
+
+
+module_init(smsc_ircc_init);
+module_exit(smsc_ircc_cleanup);
diff --git a/drivers/staging/irda/drivers/smsc-ircc2.h b/drivers/staging/irda/drivers/smsc-ircc2.h
new file mode 100644
index 000000000000..4829fa22cb29
--- /dev/null
+++ b/drivers/staging/irda/drivers/smsc-ircc2.h
@@ -0,0 +1,191 @@
+/*********************************************************************
+ *
+ * Description: Definitions for the SMC IrCC chipset
+ * Status: Experimental.
+ * Author: Daniele Peri (peri@csai.unipa.it)
+ *
+ * Copyright (c) 2002 Daniele Peri
+ * All Rights Reserved.
+ *
+ * Based on smc-ircc.h:
+ *
+ * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net>
+ * All Rights Reserved
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef SMSC_IRCC2_H
+#define SMSC_IRCC2_H
+
+/* DMA modes needed */
+#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
+#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
+
+/* Master Control Register */
+#define IRCC_MASTER 0x07
+#define IRCC_MASTER_POWERDOWN 0x80
+#define IRCC_MASTER_RESET 0x40
+#define IRCC_MASTER_INT_EN 0x20
+#define IRCC_MASTER_ERROR_RESET 0x10
+
+/* Register block 0 */
+
+/* Interrupt Identification */
+#define IRCC_IIR 0x01
+#define IRCC_IIR_ACTIVE_FRAME 0x80
+#define IRCC_IIR_EOM 0x40
+#define IRCC_IIR_RAW_MODE 0x20
+#define IRCC_IIR_FIFO 0x10
+
+/* Interrupt Enable */
+#define IRCC_IER 0x02
+#define IRCC_IER_ACTIVE_FRAME 0x80
+#define IRCC_IER_EOM 0x40
+#define IRCC_IER_RAW_MODE 0x20
+#define IRCC_IER_FIFO 0x10
+
+/* Line Status Register */
+#define IRCC_LSR 0x03
+#define IRCC_LSR_UNDERRUN 0x80
+#define IRCC_LSR_OVERRUN 0x40
+#define IRCC_LSR_FRAME_ERROR 0x20
+#define IRCC_LSR_SIZE_ERROR 0x10
+#define IRCC_LSR_CRC_ERROR 0x80
+#define IRCC_LSR_FRAME_ABORT 0x40
+
+/* Line Status Address Register */
+#define IRCC_LSAR 0x03
+#define IRCC_LSAR_ADDRESS_MASK 0x07
+
+/* Line Control Register A */
+#define IRCC_LCR_A 0x04
+#define IRCC_LCR_A_FIFO_RESET 0x80
+#define IRCC_LCR_A_FAST 0x40
+#define IRCC_LCR_A_GP_DATA 0x20
+#define IRCC_LCR_A_RAW_TX 0x10
+#define IRCC_LCR_A_RAW_RX 0x08
+#define IRCC_LCR_A_ABORT 0x04
+#define IRCC_LCR_A_DATA_DONE 0x02
+
+/* Line Control Register B */
+#define IRCC_LCR_B 0x05
+#define IRCC_LCR_B_SCE_DISABLED 0x00
+#define IRCC_LCR_B_SCE_TRANSMIT 0x40
+#define IRCC_LCR_B_SCE_RECEIVE 0x80
+#define IRCC_LCR_B_SCE_UNDEFINED 0xc0
+#define IRCC_LCR_B_SIP_ENABLE 0x20
+#define IRCC_LCR_B_BRICK_WALL 0x10
+
+/* Bus Status Register */
+#define IRCC_BSR 0x06
+#define IRCC_BSR_NOT_EMPTY 0x80
+#define IRCC_BSR_FIFO_FULL 0x40
+#define IRCC_BSR_TIMEOUT 0x20
+
+/* Register block 1 */
+
+#define IRCC_FIFO_THRESHOLD 0x02
+
+#define IRCC_SCE_CFGA 0x00
+#define IRCC_CFGA_AUX_IR 0x80
+#define IRCC_CFGA_HALF_DUPLEX 0x04
+#define IRCC_CFGA_TX_POLARITY 0x02
+#define IRCC_CFGA_RX_POLARITY 0x01
+
+#define IRCC_CFGA_COM 0x00
+#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87
+#define IRCC_CFGA_IRDA_SIR_A 0x08
+#define IRCC_CFGA_ASK_SIR 0x10
+#define IRCC_CFGA_IRDA_SIR_B 0x18
+#define IRCC_CFGA_IRDA_HDLC 0x20
+#define IRCC_CFGA_IRDA_4PPM 0x28
+#define IRCC_CFGA_CONSUMER 0x30
+#define IRCC_CFGA_RAW_IR 0x38
+#define IRCC_CFGA_OTHER 0x40
+
+#define IRCC_IR_HDLC 0x04
+#define IRCC_IR_4PPM 0x01
+#define IRCC_IR_CONSUMER 0x02
+
+#define IRCC_SCE_CFGB 0x01
+#define IRCC_CFGB_LOOPBACK 0x20
+#define IRCC_CFGB_LPBCK_TX_CRC 0x10
+#define IRCC_CFGB_NOWAIT 0x08
+#define IRCC_CFGB_STRING_MOVE 0x04
+#define IRCC_CFGB_DMA_BURST 0x02
+#define IRCC_CFGB_DMA_ENABLE 0x01
+
+#define IRCC_CFGB_MUX_COM 0x00
+#define IRCC_CFGB_MUX_IR 0x40
+#define IRCC_CFGB_MUX_AUX 0x80
+#define IRCC_CFGB_MUX_INACTIVE 0xc0
+
+/* Register block 3 - Identification Registers! */
+#define IRCC_ID_HIGH 0x00 /* 0x10 */
+#define IRCC_ID_LOW 0x01 /* 0xB8 */
+#define IRCC_CHIP_ID 0x02 /* 0xF1 */
+#define IRCC_VERSION 0x03 /* 0x01 */
+#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
+#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */
+#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */
+
+/* Register block 4 - IrDA */
+#define IRCC_CONTROL 0x00
+#define IRCC_BOF_COUNT_LO 0x01 /* Low byte */
+#define IRCC_BOF_COUNT_HI 0x00 /* High nibble (bit 0-3) */
+#define IRCC_BRICKWALL_CNT_LO 0x02 /* Low byte */
+#define IRCC_BRICKWALL_CNT_HI 0x03 /* High nibble (bit 4-7) */
+#define IRCC_TX_SIZE_LO 0x04 /* Low byte */
+#define IRCC_TX_SIZE_HI 0x03 /* High nibble (bit 0-3) */
+#define IRCC_RX_SIZE_HI 0x05 /* High nibble (bit 0-3) */
+#define IRCC_RX_SIZE_LO 0x06 /* Low byte */
+
+#define IRCC_1152 0x80
+#define IRCC_CRC 0x40
+
+/* Register block 5 - IrDA */
+#define IRCC_ATC 0x00
+#define IRCC_ATC_nPROGREADY 0x80
+#define IRCC_ATC_SPEED 0x40
+#define IRCC_ATC_ENABLE 0x20
+#define IRCC_ATC_MASK 0xE0
+
+
+#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01
+
+#define IRCC_SCE_TX_DELAY_TIMER 0x02
+
+/*
+ * Other definitions
+ */
+
+#define SMSC_IRCC2_MAX_SIR_SPEED 115200
+#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8
+#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8
+#define SMSC_IRCC2_FIFO_SIZE 16
+#define SMSC_IRCC2_FIFO_THRESHOLD 64
+/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+#define SMSC_IRCC2_RX_BUFF_TRUESIZE 14384
+#define SMSC_IRCC2_TX_BUFF_TRUESIZE 14384
+#define SMSC_IRCC2_MIN_TURN_TIME 0x07
+#define SMSC_IRCC2_WINDOW_SIZE 0x07
+/* Maximum wait for hw transmitter to finish */
+#define SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US 1000 /* 1 ms */
+/* Maximum wait for ATC transceiver programming to finish */
+#define SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES 1
+#endif /* SMSC_IRCC2_H */
diff --git a/drivers/staging/irda/drivers/smsc-sio.h b/drivers/staging/irda/drivers/smsc-sio.h
new file mode 100644
index 000000000000..59e20e653ebe
--- /dev/null
+++ b/drivers/staging/irda/drivers/smsc-sio.h
@@ -0,0 +1,100 @@
+#ifndef SMSC_SIO_H
+#define SMSC_SIO_H
+
+/******************************************
+ Keys. They should work with every SMsC SIO
+ ******************************************/
+
+#define SMSCSIO_CFGACCESSKEY 0x55
+#define SMSCSIO_CFGEXITKEY 0xaa
+
+/*****************************
+ * Generic SIO Flat (!?) *
+ *****************************/
+
+/* Register 0x0d */
+#define SMSCSIOFLAT_DEVICEID_REG 0x0d
+
+/* Register 0x0c */
+#define SMSCSIOFLAT_UARTMODE0C_REG 0x0c
+#define SMSCSIOFLAT_UART2MODE_MASK 0x38
+#define SMSCSIOFLAT_UART2MODE_VAL_COM 0x00
+#define SMSCSIOFLAT_UART2MODE_VAL_IRDA 0x08
+#define SMSCSIOFLAT_UART2MODE_VAL_ASKIR 0x10
+
+/* Register 0x25 */
+#define SMSCSIOFLAT_UART2BASEADDR_REG 0x25
+
+/* Register 0x2b */
+#define SMSCSIOFLAT_FIRBASEADDR_REG 0x2b
+
+/* Register 0x2c */
+#define SMSCSIOFLAT_FIRDMASELECT_REG 0x2c
+#define SMSCSIOFLAT_FIRDMASELECT_MASK 0x0f
+
+/* Register 0x28 */
+#define SMSCSIOFLAT_UARTIRQSELECT_REG 0x28
+#define SMSCSIOFLAT_UART2IRQSELECT_MASK 0x0f
+#define SMSCSIOFLAT_UART1IRQSELECT_MASK 0xf0
+#define SMSCSIOFLAT_UARTIRQSELECT_VAL_NONE 0x00
+
+
+/*********************
+ * LPC47N227 *
+ *********************/
+
+#define LPC47N227_CFGACCESSKEY 0x55
+#define LPC47N227_CFGEXITKEY 0xaa
+
+/* Register 0x00 */
+#define LPC47N227_FDCPOWERVALIDCONF_REG 0x00
+#define LPC47N227_FDCPOWER_MASK 0x08
+#define LPC47N227_VALID_MASK 0x80
+
+/* Register 0x02 */
+#define LPC47N227_UART12POWER_REG 0x02
+#define LPC47N227_UART1POWERDOWN_MASK 0x08
+#define LPC47N227_UART2POWERDOWN_MASK 0x80
+
+/* Register 0x07 */
+#define LPC47N227_APMBOOTDRIVE_REG 0x07
+#define LPC47N227_PARPORT2AUTOPWRDOWN_MASK 0x10 /* auto power down on if set */
+#define LPC47N227_UART2AUTOPWRDOWN_MASK 0x20 /* auto power down on if set */
+#define LPC47N227_UART1AUTOPWRDOWN_MASK 0x40 /* auto power down on if set */
+
+/* Register 0x0c */
+#define LPC47N227_UARTMODE0C_REG 0x0c
+#define LPC47N227_UART2MODE_MASK 0x38
+#define LPC47N227_UART2MODE_VAL_COM 0x00
+#define LPC47N227_UART2MODE_VAL_IRDA 0x08
+#define LPC47N227_UART2MODE_VAL_ASKIR 0x10
+
+/* Register 0x0d */
+#define LPC47N227_DEVICEID_REG 0x0d
+#define LPC47N227_DEVICEID_DEFVAL 0x5a
+
+/* Register 0x0e */
+#define LPC47N227_REVISIONID_REG 0x0e
+
+/* Register 0x25 */
+#define LPC47N227_UART2BASEADDR_REG 0x25
+
+/* Register 0x28 */
+#define LPC47N227_UARTIRQSELECT_REG 0x28
+#define LPC47N227_UART2IRQSELECT_MASK 0x0f
+#define LPC47N227_UART1IRQSELECT_MASK 0xf0
+#define LPC47N227_UARTIRQSELECT_VAL_NONE 0x00
+
+/* Register 0x2b */
+#define LPC47N227_FIRBASEADDR_REG 0x2b
+
+/* Register 0x2c */
+#define LPC47N227_FIRDMASELECT_REG 0x2c
+#define LPC47N227_FIRDMASELECT_MASK 0x0f
+#define LPC47N227_FIRDMASELECT_VAL_DMA1 0x01 /* 47n227 has three dma channels */
+#define LPC47N227_FIRDMASELECT_VAL_DMA2 0x02
+#define LPC47N227_FIRDMASELECT_VAL_DMA3 0x03
+#define LPC47N227_FIRDMASELECT_VAL_NONE 0x0f
+
+
+#endif
diff --git a/drivers/staging/irda/drivers/stir4200.c b/drivers/staging/irda/drivers/stir4200.c
new file mode 100644
index 000000000000..ee2cb70b688d
--- /dev/null
+++ b/drivers/staging/irda/drivers/stir4200.c
@@ -0,0 +1,1134 @@
+/*****************************************************************************
+*
+* Filename: stir4200.c
+* Version: 0.4
+* Description: Irda SigmaTel USB Dongle
+* Status: Experimental
+* Author: Stephen Hemminger <shemminger@osdl.org>
+*
+* Based on earlier driver by Paul Stewart <stewart@parc.com>
+*
+* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
+* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
+* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
+* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+/*
+ * This dongle does no framing, and requires polling to receive the
+ * data. The STIr4200 has bulk in and out endpoints just like
+ * usr-irda devices, but the data it sends and receives is raw; like
+ * irtty, it needs to call the wrap and unwrap functions to add and
+ * remove SOF/BOF and escape characters to/from the frame.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_DESCRIPTION("IrDA-USB Dongle Driver for SigmaTel STIr4200");
+MODULE_LICENSE("GPL");
+
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
+
+static int rx_sensitivity = 1; /* FIR 0..4, SIR 0..6 */
+module_param(rx_sensitivity, int, 0);
+MODULE_PARM_DESC(rx_sensitivity, "Set Receiver sensitivity (0-6, 0 is most sensitive)");
+
+static int tx_power = 0; /* 0 = highest ... 3 = lowest */
+module_param(tx_power, int, 0);
+MODULE_PARM_DESC(tx_power, "Set Transmitter power (0-3, 0 is highest power)");
+
+#define STIR_IRDA_HEADER 4
+#define CTRL_TIMEOUT 100 /* milliseconds */
+#define TRANSMIT_TIMEOUT 200 /* milliseconds */
+#define STIR_FIFO_SIZE 4096
+#define FIFO_REGS_SIZE 3
+
+enum FirChars {
+ FIR_CE = 0x7d,
+ FIR_XBOF = 0x7f,
+ FIR_EOF = 0x7e,
+};
+
+enum StirRequests {
+ REQ_WRITE_REG = 0x00,
+ REQ_READ_REG = 0x01,
+ REQ_READ_ROM = 0x02,
+ REQ_WRITE_SINGLE = 0x03,
+};
+
+/* Register offsets */
+enum StirRegs {
+ REG_RSVD=0,
+ REG_MODE,
+ REG_PDCLK,
+ REG_CTRL1,
+ REG_CTRL2,
+ REG_FIFOCTL,
+ REG_FIFOLSB,
+ REG_FIFOMSB,
+ REG_DPLL,
+ REG_IRDIG,
+ REG_TEST=15,
+};
+
+enum StirModeMask {
+ MODE_FIR = 0x80,
+ MODE_SIR = 0x20,
+ MODE_ASK = 0x10,
+ MODE_FASTRX = 0x08,
+ MODE_FFRSTEN = 0x04,
+ MODE_NRESET = 0x02,
+ MODE_2400 = 0x01,
+};
+
+enum StirPdclkMask {
+ PDCLK_4000000 = 0x02,
+ PDCLK_115200 = 0x09,
+ PDCLK_57600 = 0x13,
+ PDCLK_38400 = 0x1D,
+ PDCLK_19200 = 0x3B,
+ PDCLK_9600 = 0x77,
+ PDCLK_2400 = 0xDF,
+};
+
+enum StirCtrl1Mask {
+ CTRL1_SDMODE = 0x80,
+ CTRL1_RXSLOW = 0x40,
+ CTRL1_TXPWD = 0x10,
+ CTRL1_RXPWD = 0x08,
+ CTRL1_SRESET = 0x01,
+};
+
+enum StirCtrl2Mask {
+ CTRL2_SPWIDTH = 0x08,
+ CTRL2_REVID = 0x03,
+};
+
+enum StirFifoCtlMask {
+ FIFOCTL_DIR = 0x10,
+ FIFOCTL_CLR = 0x08,
+ FIFOCTL_EMPTY = 0x04,
+};
+
+enum StirDiagMask {
+ IRDIG_RXHIGH = 0x80,
+ IRDIG_RXLOW = 0x40,
+};
+
+enum StirTestMask {
+ TEST_PLLDOWN = 0x80,
+ TEST_LOOPIR = 0x40,
+ TEST_LOOPUSB = 0x20,
+ TEST_TSTENA = 0x10,
+ TEST_TSTOSC = 0x0F,
+};
+
+struct stir_cb {
+ struct usb_device *usbdev; /* init: probe_irda */
+ struct net_device *netdev; /* network layer */
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+
+ struct qos_info qos;
+ unsigned speed; /* Current speed */
+
+ struct task_struct *thread; /* transmit thread */
+
+ struct sk_buff *tx_pending;
+ void *io_buf; /* transmit/receive buffer */
+ __u8 *fifo_status;
+
+ iobuff_t rx_buff; /* receive unwrap state machine */
+ ktime_t rx_time;
+ int receiving;
+ struct urb *rx_urb;
+};
+
+
+/* These are the currently known USB ids */
+static const struct usb_device_id dongles[] = {
+ /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */
+ { USB_DEVICE(0x066f, 0x4200) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, dongles);
+
+/* Send control message to set dongle register */
+static int write_reg(struct stir_cb *stir, __u16 reg, __u8 value)
+{
+ struct usb_device *dev = stir->usbdev;
+
+ pr_debug("%s: write reg %d = 0x%x\n",
+ stir->netdev->name, reg, value);
+ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ REQ_WRITE_SINGLE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
+ value, reg, NULL, 0,
+ CTRL_TIMEOUT);
+}
+
+/* Send control message to read multiple registers */
+static inline int read_reg(struct stir_cb *stir, __u16 reg,
+ __u8 *data, __u16 count)
+{
+ struct usb_device *dev = stir->usbdev;
+
+ return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ REQ_READ_REG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, reg, data, count,
+ CTRL_TIMEOUT);
+}
+
+static inline int isfir(u32 speed)
+{
+ return speed == 4000000;
+}
+
+/*
+ * Prepare a FIR IrDA frame for transmission to the USB dongle. The
+ * FIR transmit frame is documented in the datasheet. It consists of
+ * a two byte 0x55 0xAA sequence, two little-endian length bytes, a
+ * sequence of exactly 16 XBOF bytes of 0x7E, two BOF bytes of 0x7E,
+ * then the data escaped as follows:
+ *
+ * 0x7D -> 0x7D 0x5D
+ * 0x7E -> 0x7D 0x5E
+ * 0x7F -> 0x7D 0x5F
+ *
+ * Then, 4 bytes of little endian (stuffed) FCS follow, then two
+ * trailing EOF bytes of 0x7E.
+ */
+static inline __u8 *stuff_fir(__u8 *p, __u8 c)
+{
+ switch(c) {
+ case 0x7d:
+ case 0x7e:
+ case 0x7f:
+ *p++ = 0x7d;
+ c ^= IRDA_TRANS;
+ /* fall through */
+ default:
+ *p++ = c;
+ }
+ return p;
+}
+
+/* Take raw data in skb and put it wrapped into buf */
+static unsigned wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
+{
+ __u8 *ptr = buf;
+ __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
+ __u16 wraplen;
+ int i;
+
+ /* Header */
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+
+ ptr = buf + STIR_IRDA_HEADER;
+ memset(ptr, 0x7f, 16);
+ ptr += 16;
+
+ /* BOF */
+ *ptr++ = 0x7e;
+ *ptr++ = 0x7e;
+
+ /* Address / Control / Information */
+ for (i = 0; i < skb->len; i++)
+ ptr = stuff_fir(ptr, skb->data[i]);
+
+ /* FCS */
+ ptr = stuff_fir(ptr, fcs & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 8) & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 16) & 0xff);
+ ptr = stuff_fir(ptr, (fcs >> 24) & 0xff);
+
+ /* EOFs */
+ *ptr++ = 0x7e;
+ *ptr++ = 0x7e;
+
+ /* Total length, minus the header */
+ wraplen = (ptr - buf) - STIR_IRDA_HEADER;
+ buf[2] = wraplen & 0xff;
+ buf[3] = (wraplen >> 8) & 0xff;
+
+ return wraplen + STIR_IRDA_HEADER;
+}
+
+static unsigned wrap_sir_skb(struct sk_buff *skb, __u8 *buf)
+{
+ __u16 wraplen;
+
+ wraplen = async_wrap_skb(skb, buf + STIR_IRDA_HEADER,
+ STIR_FIFO_SIZE - STIR_IRDA_HEADER);
+ buf[0] = 0x55;
+ buf[1] = 0xAA;
+ buf[2] = wraplen & 0xff;
+ buf[3] = (wraplen >> 8) & 0xff;
+
+ return wraplen + STIR_IRDA_HEADER;
+}
+
+/*
+ * Frame is fully formed in the rx_buff so check crc
+ * and pass up to irlap
+ * setup for next receive
+ */
+static void fir_eof(struct stir_cb *stir)
+{
+ iobuff_t *rx_buff = &stir->rx_buff;
+ int len = rx_buff->len - 4;
+ struct sk_buff *skb, *nskb;
+ __u32 fcs;
+
+ if (unlikely(len <= 0)) {
+ pr_debug("%s: short frame len %d\n",
+ stir->netdev->name, len);
+
+ ++stir->netdev->stats.rx_errors;
+ ++stir->netdev->stats.rx_length_errors;
+ return;
+ }
+
+ fcs = ~(crc32_le(~0, rx_buff->data, len));
+ if (fcs != get_unaligned_le32(rx_buff->data + len)) {
+ pr_debug("crc error calc 0x%x len %d\n", fcs, len);
+ stir->netdev->stats.rx_errors++;
+ stir->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ /* if frame is short then just copy it */
+ if (len < IRDA_RX_COPY_THRESHOLD) {
+ nskb = dev_alloc_skb(len + 1);
+ if (unlikely(!nskb)) {
+ ++stir->netdev->stats.rx_dropped;
+ return;
+ }
+ skb_reserve(nskb, 1);
+ skb = nskb;
+ skb_copy_to_linear_data(nskb, rx_buff->data, len);
+ } else {
+ nskb = dev_alloc_skb(rx_buff->truesize);
+ if (unlikely(!nskb)) {
+ ++stir->netdev->stats.rx_dropped;
+ return;
+ }
+ skb_reserve(nskb, 1);
+ skb = rx_buff->skb;
+ rx_buff->skb = nskb;
+ rx_buff->head = nskb->data;
+ }
+
+ skb_put(skb, len);
+
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->dev = stir->netdev;
+
+ netif_rx(skb);
+
+ stir->netdev->stats.rx_packets++;
+ stir->netdev->stats.rx_bytes += len;
+
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+}
+
+/* Unwrap FIR stuffed data and bump it to IrLAP */
+static void stir_fir_chars(struct stir_cb *stir,
+ const __u8 *bytes, int len)
+{
+ iobuff_t *rx_buff = &stir->rx_buff;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ __u8 byte = bytes[i];
+
+ switch(rx_buff->state) {
+ case OUTSIDE_FRAME:
+ /* ignore garbage till start of frame */
+ if (unlikely(byte != FIR_EOF))
+ continue;
+ /* Now receiving frame */
+ rx_buff->state = BEGIN_FRAME;
+
+ /* Time to initialize receive buffer */
+ rx_buff->data = rx_buff->head;
+ rx_buff->len = 0;
+ continue;
+
+ case LINK_ESCAPE:
+ if (byte == FIR_EOF) {
+ pr_debug("%s: got EOF after escape\n",
+ stir->netdev->name);
+ goto frame_error;
+ }
+ rx_buff->state = INSIDE_FRAME;
+ byte ^= IRDA_TRANS;
+ break;
+
+ case BEGIN_FRAME:
+ /* ignore multiple BOF/EOF */
+ if (byte == FIR_EOF)
+ continue;
+ rx_buff->state = INSIDE_FRAME;
+ rx_buff->in_frame = TRUE;
+
+ /* fall through */
+ case INSIDE_FRAME:
+ switch(byte) {
+ case FIR_CE:
+ rx_buff->state = LINK_ESCAPE;
+ continue;
+ case FIR_XBOF:
+ /* 0x7f is not used in this framing */
+ pr_debug("%s: got XBOF without escape\n",
+ stir->netdev->name);
+ goto frame_error;
+ case FIR_EOF:
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+ fir_eof(stir);
+ continue;
+ }
+ break;
+ }
+
+ /* add byte to rx buffer */
+ if (unlikely(rx_buff->len >= rx_buff->truesize)) {
+ pr_debug("%s: fir frame exceeds %d\n",
+ stir->netdev->name, rx_buff->truesize);
+ ++stir->netdev->stats.rx_over_errors;
+ goto error_recovery;
+ }
+
+ rx_buff->data[rx_buff->len++] = byte;
+ continue;
+
+ frame_error:
+ ++stir->netdev->stats.rx_frame_errors;
+
+ error_recovery:
+ ++stir->netdev->stats.rx_errors;
+ rx_buff->state = OUTSIDE_FRAME;
+ rx_buff->in_frame = FALSE;
+ }
+}
+
+/* Unwrap SIR stuffed data and bump it up to IrLAP */
+static void stir_sir_chars(struct stir_cb *stir,
+ const __u8 *bytes, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ async_unwrap_char(stir->netdev, &stir->netdev->stats,
+ &stir->rx_buff, bytes[i]);
+}
+
+static inline void unwrap_chars(struct stir_cb *stir,
+ const __u8 *bytes, int length)
+{
+ if (isfir(stir->speed))
+ stir_fir_chars(stir, bytes, length);
+ else
+ stir_sir_chars(stir, bytes, length);
+}
+
+/* Mode parameters for each speed */
+static const struct {
+ unsigned speed;
+ __u8 pdclk;
+} stir_modes[] = {
+ { 2400, PDCLK_2400 },
+ { 9600, PDCLK_9600 },
+ { 19200, PDCLK_19200 },
+ { 38400, PDCLK_38400 },
+ { 57600, PDCLK_57600 },
+ { 115200, PDCLK_115200 },
+ { 4000000, PDCLK_4000000 },
+};
+
+
+/*
+ * Setup chip for speed.
+ * Called at startup to initialize the chip
+ * and on speed changes.
+ *
+ * Note: Write multiple registers doesn't appear to work
+ */
+static int change_speed(struct stir_cb *stir, unsigned speed)
+{
+ int i, err;
+ __u8 mode;
+
+ for (i = 0; i < ARRAY_SIZE(stir_modes); ++i) {
+ if (speed == stir_modes[i].speed)
+ goto found;
+ }
+
+ dev_warn(&stir->netdev->dev, "invalid speed %d\n", speed);
+ return -EINVAL;
+
+ found:
+ pr_debug("speed change from %d to %d\n", stir->speed, speed);
+
+ /* Reset modulator */
+ err = write_reg(stir, REG_CTRL1, CTRL1_SRESET);
+ if (err)
+ goto out;
+
+ /* Undocumented magic to tweak the DPLL */
+ err = write_reg(stir, REG_DPLL, 0x15);
+ if (err)
+ goto out;
+
+ /* Set clock */
+ err = write_reg(stir, REG_PDCLK, stir_modes[i].pdclk);
+ if (err)
+ goto out;
+
+ mode = MODE_NRESET | MODE_FASTRX;
+ if (isfir(speed))
+ mode |= MODE_FIR | MODE_FFRSTEN;
+ else
+ mode |= MODE_SIR;
+
+ if (speed == 2400)
+ mode |= MODE_2400;
+
+ err = write_reg(stir, REG_MODE, mode);
+ if (err)
+ goto out;
+
+ /* This resets TEMIC style transceiver if any. */
+ err = write_reg(stir, REG_CTRL1,
+ CTRL1_SDMODE | (tx_power & 3) << 1);
+ if (err)
+ goto out;
+
+ err = write_reg(stir, REG_CTRL1, (tx_power & 3) << 1);
+ if (err)
+ goto out;
+
+ /* Reset sensitivity */
+ err = write_reg(stir, REG_CTRL2, (rx_sensitivity & 7) << 5);
+ out:
+ stir->speed = speed;
+ return err;
+}
+
+/*
+ * Called from net/core when new frame is available.
+ */
+static netdev_tx_t stir_hard_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+
+ /* the IRDA wrapping routines don't deal with non linear skb */
+ SKB_LINEAR_ASSERT(skb);
+
+ skb = xchg(&stir->tx_pending, skb);
+ wake_up_process(stir->thread);
+
+ /* this should never happen unless stop/wakeup problem */
+ if (unlikely(skb)) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Wait for the transmit FIFO to have space for next data
+ *
+ * If space < 0 then wait till FIFO completely drains.
+ * FYI: can take up to 13 seconds at 2400baud.
+ */
+static int fifo_txwait(struct stir_cb *stir, int space)
+{
+ int err;
+ unsigned long count, status;
+ unsigned long prev_count = 0x1fff;
+
+ /* Read FIFO status and count */
+ for (;; prev_count = count) {
+ err = read_reg(stir, REG_FIFOCTL, stir->fifo_status,
+ FIFO_REGS_SIZE);
+ if (unlikely(err != FIFO_REGS_SIZE)) {
+ dev_warn(&stir->netdev->dev,
+ "FIFO register read error: %d\n", err);
+
+ return err;
+ }
+
+ status = stir->fifo_status[0];
+ count = (unsigned)(stir->fifo_status[2] & 0x1f) << 8
+ | stir->fifo_status[1];
+
+ pr_debug("fifo status 0x%lx count %lu\n", status, count);
+
+ /* is fifo receiving already, or empty */
+ if (!(status & FIFOCTL_DIR) ||
+ (status & FIFOCTL_EMPTY))
+ return 0;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ /* shutting down? */
+ if (!netif_running(stir->netdev) ||
+ !netif_device_present(stir->netdev))
+ return -ESHUTDOWN;
+
+ /* only waiting for some space */
+ if (space >= 0 && STIR_FIFO_SIZE - 4 > space + count)
+ return 0;
+
+ /* queue confused */
+ if (prev_count < count)
+ break;
+
+ /* estimate transfer time for remaining chars */
+ msleep((count * 8000) / stir->speed);
+ }
+
+ err = write_reg(stir, REG_FIFOCTL, FIFOCTL_CLR);
+ if (err)
+ return err;
+ err = write_reg(stir, REG_FIFOCTL, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+
+/* Wait for turnaround delay before starting transmit. */
+static void turnaround_delay(const struct stir_cb *stir, long us)
+{
+ long ticks;
+
+ if (us <= 0)
+ return;
+
+ us -= ktime_us_delta(ktime_get(), stir->rx_time);
+
+ if (us < 10)
+ return;
+
+ ticks = us / (1000000 / HZ);
+ if (ticks > 0)
+ schedule_timeout_interruptible(1 + ticks);
+ else
+ udelay(us);
+}
+
+/*
+ * Start receiver by submitting a request to the receive pipe.
+ * If nothing is available it will return after rx_interval.
+ */
+static int receive_start(struct stir_cb *stir)
+{
+ /* reset state */
+ stir->receiving = 1;
+
+ stir->rx_buff.in_frame = FALSE;
+ stir->rx_buff.state = OUTSIDE_FRAME;
+
+ stir->rx_urb->status = 0;
+ return usb_submit_urb(stir->rx_urb, GFP_KERNEL);
+}
+
+/* Stop all pending receive Urb's */
+static void receive_stop(struct stir_cb *stir)
+{
+ stir->receiving = 0;
+ usb_kill_urb(stir->rx_urb);
+
+ if (stir->rx_buff.in_frame)
+ stir->netdev->stats.collisions++;
+}
+/*
+ * Wrap data in socket buffer and send it.
+ */
+static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
+{
+ unsigned wraplen;
+ int first_frame = 0;
+
+ /* if receiving, need to turnaround */
+ if (stir->receiving) {
+ receive_stop(stir);
+ turnaround_delay(stir, irda_get_mtt(skb));
+ first_frame = 1;
+ }
+
+ if (isfir(stir->speed))
+ wraplen = wrap_fir_skb(skb, stir->io_buf);
+ else
+ wraplen = wrap_sir_skb(skb, stir->io_buf);
+
+ /* check for space available in fifo */
+ if (!first_frame)
+ fifo_txwait(stir, wraplen);
+
+ stir->netdev->stats.tx_packets++;
+ stir->netdev->stats.tx_bytes += skb->len;
+ netif_trans_update(stir->netdev);
+ pr_debug("send %d (%d)\n", skb->len, wraplen);
+
+ if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
+ stir->io_buf, wraplen,
+ NULL, TRANSMIT_TIMEOUT))
+ stir->netdev->stats.tx_errors++;
+}
+
+/*
+ * Transmit state machine thread
+ */
+static int stir_transmit_thread(void *arg)
+{
+ struct stir_cb *stir = arg;
+ struct net_device *dev = stir->netdev;
+ struct sk_buff *skb;
+
+ while (!kthread_should_stop()) {
+#ifdef CONFIG_PM
+ /* if suspending, then power off and wait */
+ if (unlikely(freezing(current))) {
+ if (stir->receiving)
+ receive_stop(stir);
+ else
+ fifo_txwait(stir, -1);
+
+ write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
+
+ try_to_freeze();
+
+ if (change_speed(stir, stir->speed))
+ break;
+ }
+#endif
+
+ /* if something to send? */
+ skb = xchg(&stir->tx_pending, NULL);
+ if (skb) {
+ unsigned new_speed = irda_get_next_speed(skb);
+ netif_wake_queue(dev);
+
+ if (skb->len > 0)
+ stir_send(stir, skb);
+ dev_kfree_skb(skb);
+
+ if ((new_speed != -1) && (stir->speed != new_speed)) {
+ if (fifo_txwait(stir, -1) ||
+ change_speed(stir, new_speed))
+ break;
+ }
+ continue;
+ }
+
+ /* nothing to send? start receiving */
+ if (!stir->receiving &&
+ irda_device_txqueue_empty(dev)) {
+ /* Wait otherwise chip gets confused. */
+ if (fifo_txwait(stir, -1))
+ break;
+
+ if (unlikely(receive_start(stir))) {
+ if (net_ratelimit())
+ dev_info(&dev->dev,
+ "%s: receive usb submit failed\n",
+ stir->netdev->name);
+ stir->receiving = 0;
+ msleep(10);
+ continue;
+ }
+ }
+
+ /* sleep if nothing to send */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ }
+ return 0;
+}
+
+
+/*
+ * USB bulk receive completion callback.
+ * Wakes up every ms (usb round trip) with wrapped
+ * data.
+ */
+static void stir_rcv_irq(struct urb *urb)
+{
+ struct stir_cb *stir = urb->context;
+ int err;
+
+ /* in process of stopping, just drop data */
+ if (!netif_running(stir->netdev))
+ return;
+
+ /* unlink, shutdown, unplug, other nasties */
+ if (urb->status != 0)
+ return;
+
+ if (urb->actual_length > 0) {
+ pr_debug("receive %d\n", urb->actual_length);
+ unwrap_chars(stir, urb->transfer_buffer,
+ urb->actual_length);
+
+ stir->rx_time = ktime_get();
+ }
+
+ /* kernel thread is stopping receiver don't resubmit */
+ if (!stir->receiving)
+ return;
+
+ /* resubmit existing urb */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* in case of error, the kernel thread will restart us */
+ if (err) {
+ dev_warn(&stir->netdev->dev, "usb receive submit error: %d\n",
+ err);
+ stir->receiving = 0;
+ wake_up_process(stir->thread);
+ }
+}
+
+/*
+ * Function stir_net_open (dev)
+ *
+ * Network device is taken up. Usually this is done by "ifconfig irda0 up"
+ */
+static int stir_net_open(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+ int err;
+ char hwname[16];
+
+ err = usb_clear_halt(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1));
+ if (err)
+ goto err_out1;
+ err = usb_clear_halt(stir->usbdev, usb_rcvbulkpipe(stir->usbdev, 2));
+ if (err)
+ goto err_out1;
+
+ err = change_speed(stir, 9600);
+ if (err)
+ goto err_out1;
+
+ err = -ENOMEM;
+
+ /* Initialize for SIR/FIR to copy data directly into skb. */
+ stir->receiving = 0;
+ stir->rx_buff.truesize = IRDA_SKB_MAX_MTU;
+ stir->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
+ if (!stir->rx_buff.skb)
+ goto err_out1;
+
+ skb_reserve(stir->rx_buff.skb, 1);
+ stir->rx_buff.head = stir->rx_buff.skb->data;
+ stir->rx_time = ktime_get();
+
+ stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!stir->rx_urb)
+ goto err_out2;
+
+ stir->io_buf = kmalloc(STIR_FIFO_SIZE, GFP_KERNEL);
+ if (!stir->io_buf)
+ goto err_out3;
+
+ usb_fill_bulk_urb(stir->rx_urb, stir->usbdev,
+ usb_rcvbulkpipe(stir->usbdev, 2),
+ stir->io_buf, STIR_FIFO_SIZE,
+ stir_rcv_irq, stir);
+
+ stir->fifo_status = kmalloc(FIFO_REGS_SIZE, GFP_KERNEL);
+ if (!stir->fifo_status)
+ goto err_out4;
+
+ /*
+ * Now that everything should be initialized properly,
+ * Open new IrLAP layer instance to take care of us...
+ * Note : will send immediately a speed change...
+ */
+ sprintf(hwname, "usb#%d", stir->usbdev->devnum);
+ stir->irlap = irlap_open(netdev, &stir->qos, hwname);
+ if (!stir->irlap) {
+ dev_err(&stir->usbdev->dev, "irlap_open failed\n");
+ goto err_out5;
+ }
+
+ /** Start kernel thread for transmit. */
+ stir->thread = kthread_run(stir_transmit_thread, stir,
+ "%s", stir->netdev->name);
+ if (IS_ERR(stir->thread)) {
+ err = PTR_ERR(stir->thread);
+ dev_err(&stir->usbdev->dev, "unable to start kernel thread\n");
+ goto err_out6;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+ err_out6:
+ irlap_close(stir->irlap);
+ err_out5:
+ kfree(stir->fifo_status);
+ err_out4:
+ kfree(stir->io_buf);
+ err_out3:
+ usb_free_urb(stir->rx_urb);
+ err_out2:
+ kfree_skb(stir->rx_buff.skb);
+ err_out1:
+ return err;
+}
+
+/*
+ * Function stir_net_close (stir)
+ *
+ * Network device is taken down. Usually this is done by
+ * "ifconfig irda0 down"
+ */
+static int stir_net_close(struct net_device *netdev)
+{
+ struct stir_cb *stir = netdev_priv(netdev);
+
+ /* Stop transmit processing */
+ netif_stop_queue(netdev);
+
+ /* Kill transmit thread */
+ kthread_stop(stir->thread);
+ kfree(stir->fifo_status);
+
+ /* Mop up receive urb's */
+ usb_kill_urb(stir->rx_urb);
+
+ kfree(stir->io_buf);
+ usb_free_urb(stir->rx_urb);
+ kfree_skb(stir->rx_buff.skb);
+
+ /* Stop and remove instance of IrLAP */
+ if (stir->irlap)
+ irlap_close(stir->irlap);
+
+ stir->irlap = NULL;
+
+ return 0;
+}
+
+/*
+ * IOCTLs : Extra out-of-band network commands...
+ */
+static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct stir_cb *stir = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the device is still there */
+ if (netif_device_present(stir->netdev))
+ ret = change_speed(stir, irq->ifr_baudrate);
+ break;
+
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Check if the IrDA stack is still there */
+ if (netif_running(stir->netdev))
+ irda_device_set_media_busy(stir->netdev, TRUE);
+ break;
+
+ case SIOCGRECEIVING:
+ /* Only approximately true */
+ irq->ifr_receiving = stir->receiving;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops stir_netdev_ops = {
+ .ndo_open = stir_net_open,
+ .ndo_stop = stir_net_close,
+ .ndo_start_xmit = stir_hard_xmit,
+ .ndo_do_ioctl = stir_net_ioctl,
+};
+
+/*
+ * This routine is called by the USB subsystem for each new device
+ * in the system. We need to check if the device is ours, and in
+ * this case start handling it.
+ * Note : it might be worth protecting this function by a global
+ * spinlock... Or not, because maybe USB already deal with that...
+ */
+static int stir_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct stir_cb *stir = NULL;
+ struct net_device *net;
+ int ret = -ENOMEM;
+
+ /* Allocate network device container. */
+ net = alloc_irdadev(sizeof(*stir));
+ if(!net)
+ goto err_out1;
+
+ SET_NETDEV_DEV(net, &intf->dev);
+ stir = netdev_priv(net);
+ stir->netdev = net;
+ stir->usbdev = dev;
+
+ ret = usb_reset_configuration(dev);
+ if (ret != 0) {
+ dev_err(&intf->dev, "usb reset configuration failed\n");
+ goto err_out2;
+ }
+
+ printk(KERN_INFO "SigmaTel STIr4200 IRDA/USB found at address %d, "
+ "Vendor: %x, Product: %x\n",
+ dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&stir->qos);
+
+ /* That's the Rx capability. */
+ stir->qos.baud_rate.bits &= IR_2400 | IR_9600 | IR_19200 |
+ IR_38400 | IR_57600 | IR_115200 |
+ (IR_4000000 << 8);
+ stir->qos.min_turn_time.bits &= qos_mtt_bits;
+ irda_qos_bits_to_value(&stir->qos);
+
+ /* Override the network functions we need to use */
+ net->netdev_ops = &stir_netdev_ops;
+
+ ret = register_netdev(net);
+ if (ret != 0)
+ goto err_out2;
+
+ dev_info(&intf->dev, "IrDA: Registered SigmaTel device %s\n",
+ net->name);
+
+ usb_set_intfdata(intf, stir);
+
+ return 0;
+
+err_out2:
+ free_netdev(net);
+err_out1:
+ return ret;
+}
+
+/*
+ * The current device is removed, the USB layer tell us to shut it down...
+ */
+static void stir_disconnect(struct usb_interface *intf)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ if (!stir)
+ return;
+
+ unregister_netdev(stir->netdev);
+ free_netdev(stir->netdev);
+
+ usb_set_intfdata(intf, NULL);
+}
+
+#ifdef CONFIG_PM
+/* USB suspend, so power off the transmitter/receiver */
+static int stir_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ netif_device_detach(stir->netdev);
+ return 0;
+}
+
+/* Coming out of suspend, so reset hardware */
+static int stir_resume(struct usb_interface *intf)
+{
+ struct stir_cb *stir = usb_get_intfdata(intf);
+
+ netif_device_attach(stir->netdev);
+
+ /* receiver restarted when send thread wakes up */
+ return 0;
+}
+#endif
+
+/*
+ * USB device callbacks
+ */
+static struct usb_driver irda_driver = {
+ .name = "stir4200",
+ .probe = stir_probe,
+ .disconnect = stir_disconnect,
+ .id_table = dongles,
+#ifdef CONFIG_PM
+ .suspend = stir_suspend,
+ .resume = stir_resume,
+#endif
+};
+
+module_usb_driver(irda_driver);
diff --git a/drivers/staging/irda/drivers/tekram-sir.c b/drivers/staging/irda/drivers/tekram-sir.c
new file mode 100644
index 000000000000..9dcf0c103b9d
--- /dev/null
+++ b/drivers/staging/irda/drivers/tekram-sir.c
@@ -0,0 +1,225 @@
+/*********************************************************************
+ *
+ * Filename: tekram.c
+ * Version: 1.3
+ * Description: Implementation of the Tekram IrMate IR-210B dongle
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Wed Oct 21 20:02:35 1998
+ * Modified at: Sun Oct 27 22:02:38 2002
+ * Modified by: Martin Diehl <mad@mdiehl.de>
+ *
+ * Copyright (c) 1998-1999 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int tekram_delay = 150; /* default is 150 ms */
+module_param(tekram_delay, int, 0);
+MODULE_PARM_DESC(tekram_delay, "tekram dongle write complete delay");
+
+static int tekram_open(struct sir_dev *);
+static int tekram_close(struct sir_dev *);
+static int tekram_change_speed(struct sir_dev *, unsigned);
+static int tekram_reset(struct sir_dev *);
+
+#define TEKRAM_115200 0x00
+#define TEKRAM_57600 0x01
+#define TEKRAM_38400 0x02
+#define TEKRAM_19200 0x03
+#define TEKRAM_9600 0x04
+
+#define TEKRAM_PW 0x10 /* Pulse select bit */
+
+static struct dongle_driver tekram = {
+ .owner = THIS_MODULE,
+ .driver_name = "Tekram IR-210B",
+ .type = IRDA_TEKRAM_DONGLE,
+ .open = tekram_open,
+ .close = tekram_close,
+ .reset = tekram_reset,
+ .set_speed = tekram_change_speed,
+};
+
+static int __init tekram_sir_init(void)
+{
+ if (tekram_delay < 1 || tekram_delay > 500)
+ tekram_delay = 200;
+ pr_debug("%s - using %d ms delay\n",
+ tekram.driver_name, tekram_delay);
+ return irda_register_dongle(&tekram);
+}
+
+static void __exit tekram_sir_cleanup(void)
+{
+ irda_unregister_dongle(&tekram);
+}
+
+static int tekram_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int tekram_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function tekram_change_speed (dev, state, speed)
+ *
+ * Set the speed for the Tekram IRMate 210 type dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. clear DTR
+ * 2. set RTS, and wait at least 7 us
+ * 3. send Control Byte to the IR-210 through TXD to set new baud rate
+ * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
+ * it takes about 100 msec)
+ *
+ * [oops, why 100 msec? sending 1 byte (10 bits) takes 1.05 msec
+ * - is this probably to compensate for delays in tty layer?]
+ *
+ * 5. clear RTS (return to NORMAL Operation)
+ * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here
+ * after
+ */
+
+#define TEKRAM_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 byte;
+ static int ret = 0;
+
+ switch(state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ switch (speed) {
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall thru */
+ case 9600:
+ byte = TEKRAM_PW|TEKRAM_9600;
+ break;
+ case 19200:
+ byte = TEKRAM_PW|TEKRAM_19200;
+ break;
+ case 38400:
+ byte = TEKRAM_PW|TEKRAM_38400;
+ break;
+ case 57600:
+ byte = TEKRAM_PW|TEKRAM_57600;
+ break;
+ case 115200:
+ byte = TEKRAM_115200;
+ break;
+ }
+
+ /* Set DTR, Clear RTS */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ sirdev_raw_write(dev, &byte, 1);
+
+ dev->speed = speed;
+
+ state = TEKRAM_STATE_WAIT_SPEED;
+ delay = tekram_delay;
+ break;
+
+ case TEKRAM_STATE_WAIT_SPEED:
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+ udelay(50);
+ break;
+
+ default:
+ net_err_ratelimited("%s - undefined state %d\n",
+ __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function tekram_reset (driver)
+ *
+ * This function resets the tekram dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * Algorithm:
+ * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 )
+ * 1. clear RTS
+ * 2. set DTR, and wait at least 1 ms
+ * 3. clear DTR to SPACE state, wait at least 50 us for further
+ * operation
+ */
+
+static int tekram_reset(struct sir_dev *dev)
+{
+ /* Clear DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, FALSE, TRUE);
+
+ /* Should sleep 1 ms */
+ msleep(1);
+
+ /* Set DTR, Set RTS */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 50 us */
+ udelay(75);
+
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */
+
+module_init(tekram_sir_init);
+module_exit(tekram_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/toim3232-sir.c b/drivers/staging/irda/drivers/toim3232-sir.c
new file mode 100644
index 000000000000..b977d6d33e74
--- /dev/null
+++ b/drivers/staging/irda/drivers/toim3232-sir.c
@@ -0,0 +1,358 @@
+/*********************************************************************
+ *
+ * Filename: toim3232-sir.c
+ * Version: 1.0
+ * Description: Implementation of dongles based on the Vishay/Temic
+ * TOIM3232 SIR Endec chipset. Currently only the
+ * IRWave IR320ST-2 is tested, although it should work
+ * with any TOIM3232 or TOIM4232 chipset based RS232
+ * dongle with minimal modification.
+ * Based heavily on the Tekram driver (tekram.c),
+ * with thanks to Dag Brattli and Martin Diehl.
+ * Status: Experimental.
+ * Author: David Basden <davidb-irda@rcpt.to>
+ * Created at: Thu Feb 09 23:47:32 2006
+ *
+ * Copyright (c) 2006 David Basden.
+ * Copyright (c) 1998-1999 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * This driver has currently only been tested on the IRWave IR320ST-2
+ *
+ * PROTOCOL:
+ *
+ * The protocol for talking to the TOIM3232 is quite easy, and is
+ * designed to interface with RS232 with only level convertors. The
+ * BR/~D line on the chip is brought high to signal 'command mode',
+ * where a command byte is sent to select the baudrate of the RS232
+ * interface and the pulse length of the IRDA output. When BR/~D
+ * is brought low, the dongle then changes to the selected baudrate,
+ * and the RS232 interface is used for data until BR/~D is brought
+ * high again. The initial speed for the TOIMx323 after RESET is
+ * 9600 baud. The baudrate for command-mode is the last selected
+ * baud-rate, or 9600 after a RESET.
+ *
+ * The dongle I have (below) adds some extra hardware on the front end,
+ * but this is mostly directed towards pariasitic power from the RS232
+ * line rather than changing very much about how to communicate with
+ * the TOIM3232.
+ *
+ * The protocol to talk to the TOIM4232 chipset seems to be almost
+ * identical to the TOIM3232 (and the 4232 datasheet is more detailed)
+ * so this code will probably work on that as well, although I haven't
+ * tested it on that hardware.
+ *
+ * Target dongle variations that might be common:
+ *
+ * DTR and RTS function:
+ * The data sheet for the 4232 has a sample implementation that hooks the
+ * DTR and RTS lines to the RESET and BaudRate/~Data lines of the
+ * chip (through line-converters). Given both DTR and RTS would have to
+ * be held low in normal operation, and the TOIMx232 requires +5V to
+ * signal ground, most dongle designers would almost certainly choose
+ * an implementation that kept at least one of DTR or RTS high in
+ * normal operation to provide power to the dongle, but will likely
+ * vary between designs.
+ *
+ * User specified command bits:
+ * There are two user-controllable output lines from the TOIMx232 that
+ * can be set low or high by setting the appropriate bits in the
+ * high-nibble of the command byte (when setting speed and pulse length).
+ * These might be used to switch on and off added hardware or extra
+ * dongle features.
+ *
+ *
+ * Target hardware: IRWave IR320ST-2
+ *
+ * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic
+ * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transceiver.
+ * It uses a hex inverter and some discrete components to buffer and
+ * line convert the RS232 down to 5V.
+ *
+ * The dongle is powered through a voltage regulator, fed by a large
+ * capacitor. To switch the dongle on, DTR is brought high to charge
+ * the capacitor and drive the voltage regulator. DTR isn't associated
+ * with any control lines on the TOIM3232. Parisitic power is also taken
+ * from the RTS, TD and RD lines when brought high, but through resistors.
+ * When DTR is low, the circuit might lose power even with RTS high.
+ *
+ * RTS is inverted and attached to the BR/~D input pin. When RTS
+ * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode.
+ * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command
+ * mode'.
+ *
+ * For some unknown reason, the RESET line isn't actually connected
+ * to anything. This means to reset the dongle to get it to a known
+ * state (9600 baud) you must drop DTR and RTS low, wait for the power
+ * capacitor to discharge, and then bring DTR (and RTS for data mode)
+ * high again, and wait for the capacitor to charge, the power supply
+ * to stabilise, and the oscillator clock to stabilise.
+ *
+ * Fortunately, if the current baudrate is known, the chipset can
+ * easily change speed by entering command mode without having to
+ * reset the dongle first.
+ *
+ * Major Components:
+ *
+ * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings
+ * to IRDA pulse timings
+ * - 3.6864MHz crystal to drive TOIM3232 clock oscillator
+ * - DM74lS04M Inverting Hex line buffer for RS232 input buffering
+ * and level conversion
+ * - PJ2951AC 150mA voltage regulator
+ * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int toim3232delay = 150; /* default is 150 ms */
+module_param(toim3232delay, int, 0);
+MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay");
+
+static int toim3232_open(struct sir_dev *);
+static int toim3232_close(struct sir_dev *);
+static int toim3232_change_speed(struct sir_dev *, unsigned);
+static int toim3232_reset(struct sir_dev *);
+
+#define TOIM3232_115200 0x00
+#define TOIM3232_57600 0x01
+#define TOIM3232_38400 0x02
+#define TOIM3232_19200 0x03
+#define TOIM3232_9600 0x06
+#define TOIM3232_2400 0x0A
+
+#define TOIM3232_PW 0x10 /* Pulse select bit */
+
+static struct dongle_driver toim3232 = {
+ .owner = THIS_MODULE,
+ .driver_name = "Vishay TOIM3232",
+ .type = IRDA_TOIM3232_DONGLE,
+ .open = toim3232_open,
+ .close = toim3232_close,
+ .reset = toim3232_reset,
+ .set_speed = toim3232_change_speed,
+};
+
+static int __init toim3232_sir_init(void)
+{
+ if (toim3232delay < 1 || toim3232delay > 500)
+ toim3232delay = 200;
+ pr_debug("%s - using %d ms delay\n",
+ toim3232.driver_name, toim3232delay);
+ return irda_register_dongle(&toim3232);
+}
+
+static void __exit toim3232_sir_cleanup(void)
+{
+ irda_unregister_dongle(&toim3232);
+}
+
+static int toim3232_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ /* Pull the lines high to start with.
+ *
+ * For the IR320ST-2, we need to charge the main supply capacitor to
+ * switch the device on. We keep DTR high throughout to do this.
+ * When RTS, TD and RD are high, they will also trickle-charge the
+ * cap. RTS is high for data transmission, and low for baud rate select.
+ * -- DGB
+ */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* The TOI3232 supports many speeds between 1200bps and 115000bps.
+ * We really only care about those supported by the IRDA spec, but
+ * 38400 seems to be implemented in many places */
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* From the tekram driver. Not sure what a reasonable value is -- DGB */
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int toim3232_close(struct sir_dev *dev)
+{
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function toim3232change_speed (dev, state, speed)
+ *
+ * Set the speed for the TOIM3232 based dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. keep DTR high but clear RTS to bring into baud programming mode
+ * 2. wait at least 7us to enter programming mode
+ * 3. send control word to set baud rate and timing
+ * 4. wait at least 1us
+ * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver)
+ * 6. should take effect immediately (although probably worth waiting)
+ */
+
+#define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 byte;
+ static int ret = 0;
+
+ switch(state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ /* Figure out what we are going to send as a control byte */
+ switch (speed) {
+ case 2400:
+ byte = TOIM3232_PW|TOIM3232_2400;
+ break;
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall thru */
+ case 9600:
+ byte = TOIM3232_PW|TOIM3232_9600;
+ break;
+ case 19200:
+ byte = TOIM3232_PW|TOIM3232_19200;
+ break;
+ case 38400:
+ byte = TOIM3232_PW|TOIM3232_38400;
+ break;
+ case 57600:
+ byte = TOIM3232_PW|TOIM3232_57600;
+ break;
+ case 115200:
+ byte = TOIM3232_115200;
+ break;
+ }
+
+ /* Set DTR, Clear RTS: Go into baud programming mode */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ sirdev_raw_write(dev, &byte, 1);
+
+ dev->speed = speed;
+
+ state = TOIM3232_STATE_WAIT_SPEED;
+ delay = toim3232delay;
+ break;
+
+ case TOIM3232_STATE_WAIT_SPEED:
+ /* Have transmitted control byte * Wait for 'at least 1us' */
+ udelay(14);
+
+ /* Set DTR, Set RTS: Go into normal data mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait (TODO: check this is needed) */
+ udelay(50);
+ break;
+
+ default:
+ printk(KERN_ERR "%s - undefined state %d\n", __func__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function toim3232reset (driver)
+ *
+ * This function resets the toim3232 dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * What we should do is:
+ * 0. Pull RESET high
+ * 1. Wait for at least 7us
+ * 2. Pull RESET low
+ * 3. Wait for at least 7us
+ * 4. Pull BR/~D high
+ * 5. Wait for at least 7us
+ * 6. Send control byte to set baud rate
+ * 7. Wait at least 1us after stop bit
+ * 8. Pull BR/~D low
+ * 9. Should then be in data mode
+ *
+ * Because the IR320ST-2 doesn't have the RESET line connected for some reason,
+ * we'll have to do something else.
+ *
+ * The default speed after a RESET is 9600, so lets try just bringing it up in
+ * data mode after switching it off, waiting for the supply capacitor to
+ * discharge, and then switch it back on. This isn't actually pulling RESET
+ * high, but it seems to have the same effect.
+ *
+ * This behaviour will probably work on dongles that have the RESET line connected,
+ * but if not, add a flag for the IR320ST-2, and implment the above-listed proper
+ * behaviour.
+ *
+ * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we
+ * need to have pull RTS low
+ */
+
+static int toim3232_reset(struct sir_dev *dev)
+{
+ /* Switch off both DTR and RTS to switch off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ /* Should sleep a while. This might be evil doing it this way.*/
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(50));
+
+ /* Set DTR, Set RTS (data mode) */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 10 ms for power to stabilize again */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+
+ /* Speed should now be 9600 */
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>");
+MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */
+
+module_init(toim3232_sir_init);
+module_exit(toim3232_sir_cleanup);
diff --git a/drivers/staging/irda/drivers/via-ircc.c b/drivers/staging/irda/drivers/via-ircc.c
new file mode 100644
index 000000000000..ca4442a9d631
--- /dev/null
+++ b/drivers/staging/irda/drivers/via-ircc.c
@@ -0,0 +1,1593 @@
+/********************************************************************
+ Filename: via-ircc.c
+ Version: 1.0
+ Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
+ Author: VIA Technologies,inc
+ Date : 08/06/2003
+
+Copyright (c) 1998-2003 VIA Technologies, Inc.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, see <http://www.gnu.org/licenses/>.
+
+F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
+F02 Oct/28/02: Add SB device ID for 3147 and 3177.
+ Comment :
+ jul/09/2002 : only implement two kind of dongle currently.
+ Oct/02/2002 : work on VT8231 and VT8233 .
+ Aug/06/2003 : change driver format to pci driver .
+
+2004-02-16: <sda@bdit.de>
+- Removed unneeded 'legacy' pci stuff.
+- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
+- On speed change from core, don't send SIR frame with new speed.
+ Use current speed and change speeds later.
+- Make module-param dongle_id actually work.
+- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
+ Tested with home-grown PCB on EPIA boards.
+- Code cleanup.
+
+ ********************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <linux/pm.h>
+
+#include <net/irda/wrapper.h>
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include "via-ircc.h"
+
+#define VIA_MODULE_NAME "via-ircc"
+#define CHIP_IO_EXTENT 0x40
+
+static char *driver_name = VIA_MODULE_NAME;
+
+/* Module parameters */
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+static int dongle_id = 0; /* default: probe */
+
+/* We can't guess the type of connected dongle, user *must* supply it. */
+module_param(dongle_id, int, 0);
+
+/* Some prototypes */
+static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
+ unsigned int id);
+static int via_ircc_dma_receive(struct via_ircc_cb *self);
+static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
+ int iobase);
+static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev);
+static void via_hw_init(struct via_ircc_cb *self);
+static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
+static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
+static int via_ircc_is_receiving(struct via_ircc_cb *self);
+static int via_ircc_read_dongle_id(int iobase);
+
+static int via_ircc_net_open(struct net_device *dev);
+static int via_ircc_net_close(struct net_device *dev);
+static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd);
+static void via_ircc_change_dongle_speed(int iobase, int speed,
+ int dongle_id);
+static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
+static void hwreset(struct via_ircc_cb *self);
+static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
+static int upload_rxdata(struct via_ircc_cb *self, int iobase);
+static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
+static void via_remove_one(struct pci_dev *pdev);
+
+/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
+static void iodelay(int udelay)
+{
+ u8 data;
+ int i;
+
+ for (i = 0; i < udelay; i++) {
+ data = inb(0x80);
+ }
+}
+
+static const struct pci_device_id via_pci_tbl[] = {
+ { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
+ { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
+ { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
+ { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
+ { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci,via_pci_tbl);
+
+
+static struct pci_driver via_driver = {
+ .name = VIA_MODULE_NAME,
+ .id_table = via_pci_tbl,
+ .probe = via_init_one,
+ .remove = via_remove_one,
+};
+
+
+/*
+ * Function via_ircc_init ()
+ *
+ * Initialize chip. Just find out chip type and resource.
+ */
+static int __init via_ircc_init(void)
+{
+ int rc;
+
+ rc = pci_register_driver(&via_driver);
+ if (rc < 0) {
+ pr_debug("%s(): error rc = %d, returning -ENODEV...\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ int rc;
+ u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
+ u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
+ chipio_t info;
+
+ pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device);
+
+ rc = pci_enable_device (pcidev);
+ if (rc) {
+ pr_debug("%s(): error rc = %d\n", __func__, rc);
+ return -ENODEV;
+ }
+
+ // South Bridge exist
+ if ( ReadLPCReg(0x20) != 0x3C )
+ Chipset=0x3096;
+ else
+ Chipset=0x3076;
+
+ if (Chipset==0x3076) {
+ pr_debug("%s(): Chipset = 3076\n", __func__);
+
+ WriteLPCReg(7,0x0c );
+ temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
+ if((temp&0x01)==1) { // BIOS close or no FIR
+ WriteLPCReg(0x1d, 0x82 );
+ WriteLPCReg(0x23,0x18);
+ temp=ReadLPCReg(0xF0);
+ if((temp&0x01)==0) {
+ temp=(ReadLPCReg(0x74)&0x03); //DMA
+ FirDRQ0=temp + 4;
+ temp=(ReadLPCReg(0x74)&0x0C) >> 2;
+ FirDRQ1=temp + 4;
+ } else {
+ temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
+ FirDRQ0=temp + 4;
+ FirDRQ1=FirDRQ0;
+ }
+ FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
+ FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
+ FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
+ FirIOBase=FirIOBase ;
+ info.fir_base=FirIOBase;
+ info.irq=FirIRQ;
+ info.dma=FirDRQ1;
+ info.dma2=FirDRQ0;
+ pci_read_config_byte(pcidev,0x40,&bTmp);
+ pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
+ pci_read_config_byte(pcidev,0x42,&bTmp);
+ pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
+ pci_write_config_byte(pcidev,0x5a,0xc0);
+ WriteLPCReg(0x28, 0x70 );
+ rc = via_ircc_open(pcidev, &info, 0x3076);
+ } else
+ rc = -ENODEV; //IR not turn on
+ } else { //Not VT1211
+ pr_debug("%s(): Chipset = 3096\n", __func__);
+
+ pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
+ if((bTmp&0x01)==1) { // BIOS enable FIR
+ //Enable Double DMA clock
+ pci_read_config_byte(pcidev,0x42,&oldPCI_40);
+ pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
+ pci_read_config_byte(pcidev,0x40,&oldPCI_40);
+ pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
+ pci_read_config_byte(pcidev,0x44,&oldPCI_44);
+ pci_write_config_byte(pcidev,0x44,0x4e);
+ //---------- read configuration from Function0 of south bridge
+ if((bTmp&0x02)==0) {
+ pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
+ FirDRQ0 = (bTmp1 & 0x30) >> 4;
+ pci_read_config_byte(pcidev,0x44,&bTmp1);
+ FirDRQ1 = (bTmp1 & 0xc0) >> 6;
+ } else {
+ pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
+ FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
+ FirDRQ1=0;
+ }
+ pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
+ FirIRQ = bTmp1 & 0x0f;
+
+ pci_read_config_byte(pcidev,0x69,&bTmp);
+ FirIOBase = bTmp << 8;//hight byte
+ pci_read_config_byte(pcidev,0x68,&bTmp);
+ FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
+ //-------------------------
+ info.fir_base=FirIOBase;
+ info.irq=FirIRQ;
+ info.dma=FirDRQ1;
+ info.dma2=FirDRQ0;
+ rc = via_ircc_open(pcidev, &info, 0x3096);
+ } else
+ rc = -ENODEV; //IR not turn on !!!!!
+ }//Not VT1211
+
+ pr_debug("%s(): End - rc = %d\n", __func__, rc);
+ return rc;
+}
+
+static void __exit via_ircc_cleanup(void)
+{
+ /* Cleanup all instances of the driver */
+ pci_unregister_driver (&via_driver);
+}
+
+static const struct net_device_ops via_ircc_sir_ops = {
+ .ndo_start_xmit = via_ircc_hard_xmit_sir,
+ .ndo_open = via_ircc_net_open,
+ .ndo_stop = via_ircc_net_close,
+ .ndo_do_ioctl = via_ircc_net_ioctl,
+};
+static const struct net_device_ops via_ircc_fir_ops = {
+ .ndo_start_xmit = via_ircc_hard_xmit_fir,
+ .ndo_open = via_ircc_net_open,
+ .ndo_stop = via_ircc_net_close,
+ .ndo_do_ioctl = via_ircc_net_ioctl,
+};
+
+/*
+ * Function via_ircc_open(pdev, iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
+{
+ struct net_device *dev;
+ struct via_ircc_cb *self;
+ int err;
+
+ /* Allocate new instance of the driver */
+ dev = alloc_irdadev(sizeof(struct via_ircc_cb));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ self = netdev_priv(dev);
+ self->netdev = dev;
+ spin_lock_init(&self->lock);
+
+ pci_set_drvdata(pdev, self);
+
+ /* Initialize Resource */
+ self->io.cfg_base = info->cfg_base;
+ self->io.fir_base = info->fir_base;
+ self->io.irq = info->irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = info->dma;
+ self->io.dma2 = info->dma2;
+ self->io.fifo_size = 32;
+ self->chip_id = id;
+ self->st_fifo.len = 0;
+ self->RxDataReady = 0;
+
+ /* Reserve the ioports that we need */
+ if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
+ pr_debug("%s(), can't get iobase of 0x%03x\n",
+ __func__, self->io.fir_base);
+ err = -ENODEV;
+ goto err_out1;
+ }
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* Check if user has supplied the dongle id or not */
+ if (!dongle_id)
+ dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
+ self->io.dongle_id = dongle_id;
+
+ /* The only value we must override it the baudrate */
+ /* Maximum speeds and capabilities are dongle-dependent. */
+ switch( self->io.dongle_id ){
+ case 0x0d:
+ self->qos.baud_rate.bits =
+ IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
+ IR_576000 | IR_1152000 | (IR_4000000 << 8);
+ break;
+ default:
+ self->qos.baud_rate.bits =
+ IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
+ break;
+ }
+
+ /* Following was used for testing:
+ *
+ * self->qos.baud_rate.bits = IR_9600;
+ *
+ * Is is no good, as it prohibits (error-prone) speed-changes.
+ */
+
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384 + 2048;
+ self->tx_buff.truesize = 14384 + 2048;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (self->rx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (self->tx_buff.head == NULL) {
+ err = -ENOMEM;
+ goto err_out3;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* Reset Tx queue info */
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+
+ /* Override the network functions we need to use */
+ dev->netdev_ops = &via_ircc_sir_ops;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out4;
+
+ net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n",
+ dev->name);
+
+ /* Initialise the hardware..
+ */
+ self->io.speed = 9600;
+ via_hw_init(self);
+ return 0;
+ err_out4:
+ dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ err_out3:
+ dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+ err_out2:
+ release_region(self->io.fir_base, self->io.fir_ext);
+ err_out1:
+ free_netdev(dev);
+ return err;
+}
+
+/*
+ * Function via_remove_one(pdev)
+ *
+ * Close driver instance
+ *
+ */
+static void via_remove_one(struct pci_dev *pdev)
+{
+ struct via_ircc_cb *self = pci_get_drvdata(pdev);
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ ResetChip(iobase, 5); //hardware reset.
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s(), Releasing Region %03x\n",
+ __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+ if (self->tx_buff.head)
+ dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+ if (self->rx_buff.head)
+ dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ pci_disable_device(pdev);
+}
+
+/*
+ * Function via_hw_init(self)
+ *
+ * Returns non-negative on success.
+ *
+ * Formerly via_ircc_setup
+ */
+static void via_hw_init(struct via_ircc_cb *self)
+{
+ int iobase = self->io.fir_base;
+
+ SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
+ // FIFO Init
+ EnRXFIFOReadyInt(iobase, OFF);
+ EnRXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOUnderrunEOMInt(iobase, ON);
+ EnTXFIFOReadyInt(iobase, OFF);
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+
+ if (ReadLPCReg(0x20) == 0x3c)
+ WriteLPCReg(0xF0, 0); // for VT1211
+ /* Int Init */
+ EnRXSpecInt(iobase, ON);
+
+ /* The following is basically hwreset */
+ /* If this is the case, why not just call hwreset() ? Jean II */
+ ResetChip(iobase, 5);
+ EnableDMA(iobase, OFF);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ EnTXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ TXStart(iobase, OFF);
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+ SetBaudRate(iobase, 9600);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+
+ self->io.speed = 9600;
+ self->st_fifo.len = 0;
+
+ via_ircc_change_dongle_speed(iobase, self->io.speed,
+ self->io.dongle_id);
+
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+}
+
+/*
+ * Function via_ircc_read_dongle_id (void)
+ *
+ */
+static int via_ircc_read_dongle_id(int iobase)
+{
+ net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n");
+ return 9; /* Default to IBM */
+}
+
+/*
+ * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
+ * Change speed of the attach dongle
+ * only implement two type of dongle currently.
+ */
+static void via_ircc_change_dongle_speed(int iobase, int speed,
+ int dongle_id)
+{
+ u8 mode = 0;
+
+ /* speed is unused, as we use IsSIROn()/IsMIROn() */
+ speed = speed;
+
+ pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n",
+ __func__, speed, iobase, dongle_id);
+
+ switch (dongle_id) {
+
+ /* Note: The dongle_id's listed here are derived from
+ * nsc-ircc.c */
+
+ case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
+ UseOneRX(iobase, ON); // use one RX pin RX1,RX2
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+
+ EnRX2(iobase, ON); //sir to rx2
+ EnGPIOtoRX2(iobase, OFF);
+
+ if (IsSIROn(iobase)) { //sir
+ // Mode select Off
+ SlowIRRXLowActive(iobase, ON);
+ udelay(1000);
+ SlowIRRXLowActive(iobase, OFF);
+ } else {
+ if (IsMIROn(iobase)) { //mir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ } else { // fir
+ if (IsFIROn(iobase)) { //fir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ }
+ }
+ }
+ break;
+
+ case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
+ UseOneRX(iobase, ON); //use ONE RX....RX1
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF); // invert RX pin
+
+ EnRX2(iobase, ON);
+ EnGPIOtoRX2(iobase, OFF);
+ if (IsSIROn(iobase)) { //sir
+ // Mode select On
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, OFF);
+ }
+ if (IsMIROn(iobase)) { //mir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, ON);
+ } else { // fir
+ if (IsFIROn(iobase)) { //fir
+ // Mode select On
+ SlowIRRXLowActive(iobase, OFF);
+ // TX On
+ WriteTX(iobase, ON);
+ udelay(20);
+ // Mode select OFF
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // TX Off
+ WriteTX(iobase, OFF);
+ }
+ }
+ break;
+
+ case 0x0d:
+ UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+ SlowIRRXLowActive(iobase, OFF);
+ if (IsSIROn(iobase)) { //sir
+ EnGPIOtoRX2(iobase, OFF);
+ WriteGIO(iobase, OFF);
+ EnRX2(iobase, OFF); //sir to rx2
+ } else { // fir mir
+ EnGPIOtoRX2(iobase, OFF);
+ WriteGIO(iobase, OFF);
+ EnRX2(iobase, OFF); //fir to rx
+ }
+ break;
+
+ case 0x11: /* Temic TFDS4500 */
+
+ pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n",
+ __func__);
+
+ UseOneRX(iobase, ON); //use ONE RX....RX1
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, ON); // invert RX pin
+
+ EnRX2(iobase, ON); //sir to rx2
+ EnGPIOtoRX2(iobase, OFF);
+
+ if( IsSIROn(iobase) ){ //sir
+
+ // Mode select On
+ SlowIRRXLowActive(iobase, ON);
+ udelay(20);
+ // Mode select Off
+ SlowIRRXLowActive(iobase, OFF);
+
+ } else{
+ pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n",
+ __func__);
+ }
+ break;
+
+ case 0x0ff: /* Vishay */
+ if (IsSIROn(iobase))
+ mode = 0;
+ else if (IsMIROn(iobase))
+ mode = 1;
+ else if (IsFIROn(iobase))
+ mode = 2;
+ else if (IsVFIROn(iobase))
+ mode = 5; //VFIR-16
+ SI_SetMode(iobase, mode);
+ break;
+
+ default:
+ net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n",
+ __func__, dongle_id);
+ }
+}
+
+/*
+ * Function via_ircc_change_speed (self, baud)
+ *
+ * Change the speed of the device
+ *
+ */
+static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
+{
+ struct net_device *dev = self->netdev;
+ u16 iobase;
+ u8 value = 0, bTmp;
+
+ iobase = self->io.fir_base;
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+ pr_debug("%s: change_speed to %d bps.\n", __func__, speed);
+
+ WriteReg(iobase, I_ST_CT_0, 0x0);
+
+ /* Controller mode sellection */
+ switch (speed) {
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ value = (115200/speed)-1;
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ break;
+ case 576000:
+ /* FIXME: this can't be right, as it's the same as 115200,
+ * and 576000 is MIR, not SIR. */
+ value = 0;
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ break;
+ case 1152000:
+ value = 0;
+ SetMIR(iobase, ON);
+ /* FIXME: CRC ??? */
+ break;
+ case 4000000:
+ value = 0;
+ SetFIR(iobase, ON);
+ SetPulseWidth(iobase, 0);
+ SetSendPreambleCount(iobase, 14);
+ CRC16(iobase, OFF);
+ EnTXCRC(iobase, ON);
+ break;
+ case 16000000:
+ value = 0;
+ SetVFIR(iobase, ON);
+ /* FIXME: CRC ??? */
+ break;
+ default:
+ value = 0;
+ break;
+ }
+
+ /* Set baudrate to 0x19[2..7] */
+ bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
+ bTmp |= value << 2;
+ WriteReg(iobase, I_CF_H_1, bTmp);
+
+ /* Some dongles may need to be informed about speed changes. */
+ via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
+
+ /* Set FIFO size to 64 */
+ SetFIFO(iobase, 64);
+
+ /* Enable IR */
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ // EnTXFIFOHalfLevelInt(iobase,ON);
+
+ /* Enable some interrupts so we can receive frames */
+ //EnAllInt(iobase,ON);
+
+ if (IsSIROn(iobase)) {
+ SIRFilter(iobase, ON);
+ SIRRecvAny(iobase, ON);
+ } else {
+ SIRFilter(iobase, OFF);
+ SIRRecvAny(iobase, OFF);
+ }
+
+ if (speed > 115200) {
+ /* Install FIR xmit handler */
+ dev->netdev_ops = &via_ircc_fir_ops;
+ via_ircc_dma_receive(self);
+ } else {
+ /* Install SIR xmit handler */
+ dev->netdev_ops = &via_ircc_sir_ops;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * Function via_ircc_hard_xmit (skb, dev)
+ *
+ * Transmit the frame!
+ *
+ */
+static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ unsigned long flags;
+ u16 iobase;
+ __u32 speed;
+
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
+ iobase = self->io.fir_base;
+
+ netif_stop_queue(dev);
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ via_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len =
+ async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ dev->stats.tx_bytes += self->tx_buff.len;
+ /* Send this frame with old speed */
+ SetBaudRate(iobase, self->io.speed);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ EnableTX(iobase, ON);
+ EnableRX(iobase, OFF);
+
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, ON);
+ EnRXDMA(iobase, OFF);
+
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_TX_MODE);
+
+ SetSendByte(iobase, self->tx_buff.len);
+ RXStart(iobase, OFF);
+ TXStart(iobase, ON);
+
+ netif_trans_update(dev);
+ spin_unlock_irqrestore(&self->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ u16 iobase;
+ __u32 speed;
+ unsigned long flags;
+
+ self = netdev_priv(dev);
+ iobase = self->io.fir_base;
+
+ if (self->st_fifo.len)
+ return NETDEV_TX_OK;
+ if (self->chip_id == 0x3076)
+ iodelay(1500);
+ else
+ udelay(1500);
+ netif_stop_queue(dev);
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ if (!skb->len) {
+ via_ircc_change_speed(self, speed);
+ netif_trans_update(dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ } else
+ self->new_speed = speed;
+ }
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
+ self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
+
+ self->tx_fifo.tail += skb->len;
+ dev->stats.tx_bytes += skb->len;
+ skb_copy_from_linear_data(skb,
+ self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
+ self->tx_fifo.len++;
+ self->tx_fifo.free++;
+//F01 if (self->tx_fifo.len == 1) {
+ via_ircc_dma_xmit(self, iobase);
+//F01 }
+//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
+ netif_trans_update(dev);
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&self->lock, flags);
+ return NETDEV_TX_OK;
+
+}
+
+static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
+{
+ EnTXDMA(iobase, OFF);
+ self->io.direction = IO_XMIT;
+ EnPhys(iobase, ON);
+ EnableTX(iobase, ON);
+ EnableRX(iobase, OFF);
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, ON);
+ EnRXDMA(iobase, OFF);
+ irda_setup_dma(self->io.dma,
+ ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
+ self->tx_buff.head) + self->tx_buff_dma,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
+ pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
+ __func__, self->tx_fifo.ptr,
+ self->tx_fifo.queue[self->tx_fifo.ptr].len,
+ self->tx_fifo.len);
+
+ SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
+ RXStart(iobase, OFF);
+ TXStart(iobase, ON);
+ return 0;
+
+}
+
+/*
+ * Function via_ircc_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. This function will only be called
+ * by the interrupt handler
+ *
+ */
+static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
+{
+ int iobase;
+ u8 Tx_status;
+
+ iobase = self->io.fir_base;
+ /* Disable DMA */
+// DisableDmaChannel(self->io.dma);
+ /* Check for underrun! */
+ /* Clear bit, by writing 1 into it */
+ Tx_status = GetTXStatus(iobase);
+ if (Tx_status & 0x08) {
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+ hwreset(self);
+ /* how to clear underrun? */
+ } else {
+ self->netdev->stats.tx_packets++;
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+ }
+ /* Check if we need to change the speed */
+ if (self->new_speed) {
+ via_ircc_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Finished with this frame, so prepare for next */
+ if (IsFIROn(iobase)) {
+ if (self->tx_fifo.len) {
+ self->tx_fifo.len--;
+ self->tx_fifo.ptr++;
+ }
+ }
+ pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
+ __func__,
+ self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
+/* F01_S
+ // Any frames to be sent back-to-back?
+ if (self->tx_fifo.len) {
+ // Not finished yet!
+ via_ircc_dma_xmit(self, iobase);
+ ret = FALSE;
+ } else {
+F01_E*/
+ // Reset Tx FIFO info
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+//F01 }
+
+ // Make sure we have room for more frames
+//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
+ // Not busy transmitting anymore
+ // Tell the network layer, that we can accept more frames
+ netif_wake_queue(self->netdev);
+//F01 }
+ return TRUE;
+}
+
+/*
+ * Function via_ircc_dma_receive (self)
+ *
+ * Set configuration for receive a frame.
+ *
+ */
+static int via_ircc_dma_receive(struct via_ircc_cb *self)
+{
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+ self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
+ self->tx_fifo.tail = self->tx_buff.head;
+ self->RxDataReady = 0;
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+ self->st_fifo.len = self->st_fifo.pending_bytes = 0;
+ self->st_fifo.tail = self->st_fifo.head = 0;
+
+ EnPhys(iobase, ON);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, ON);
+
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ ResetChip(iobase, 2);
+ ResetChip(iobase, 3);
+ ResetChip(iobase, 4);
+
+ EnAllInt(iobase, ON);
+ EnTXDMA(iobase, OFF);
+ EnRXDMA(iobase, ON);
+ irda_setup_dma(self->io.dma2, self->rx_buff_dma,
+ self->rx_buff.truesize, DMA_RX_MODE);
+ TXStart(iobase, OFF);
+ RXStart(iobase, ON);
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_dma_receive_complete (self)
+ *
+ * Controller Finished with receiving frames,
+ * and this routine is call by ISR
+ *
+ */
+static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
+ int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ int len, i;
+ u8 status = 0;
+
+ iobase = self->io.fir_base;
+ st_fifo = &self->st_fifo;
+
+ if (self->io.speed < 4000000) { //Speed below FIR
+ len = GetRecvByte(iobase, self);
+ skb = dev_alloc_skb(len + 1);
+ if (skb == NULL)
+ return FALSE;
+ // Make sure IP header gets aligned
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 2);
+ if (self->chip_id == 0x3076) {
+ for (i = 0; i < len - 2; i++)
+ skb->data[i] = self->rx_buff.data[i * 2];
+ } else {
+ if (self->chip_id == 0x3096) {
+ for (i = 0; i < len - 2; i++)
+ skb->data[i] =
+ self->rx_buff.data[i];
+ }
+ }
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ return TRUE;
+ }
+
+ else { //FIR mode
+ len = GetRecvByte(iobase, self);
+ if (len == 0)
+ return TRUE; //interrupt only, data maybe move by RxT
+ if (((len - 4) < 2) || ((len - 4) > 2048)) {
+ pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n",
+ __func__, len, RxCurCount(iobase, self),
+ self->RxLastCount);
+ hwreset(self);
+ return FALSE;
+ }
+ pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
+ __func__,
+ st_fifo->len, len - 4, RxCurCount(iobase, self));
+
+ st_fifo->entries[st_fifo->tail].status = status;
+ st_fifo->entries[st_fifo->tail].len = len;
+ st_fifo->pending_bytes += len;
+ st_fifo->tail++;
+ st_fifo->len++;
+ if (st_fifo->tail > MAX_RX_WINDOW)
+ st_fifo->tail = 0;
+ self->RxDataReady = 0;
+
+ // It maybe have MAX_RX_WINDOW package receive by
+ // receive_complete before Timer IRQ
+/* F01_S
+ if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
+ RXStart(iobase,ON);
+ SetTimer(iobase,4);
+ }
+ else {
+F01_E */
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+//F01_S
+ // Put this entry back in fifo
+ if (st_fifo->head > MAX_RX_WINDOW)
+ st_fifo->head = 0;
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ skb = dev_alloc_skb(len + 1 - 4);
+ /*
+ * if frame size, data ptr, or skb ptr are wrong, then get next
+ * entry.
+ */
+ if ((skb == NULL) || (skb->data == NULL) ||
+ (self->rx_buff.data == NULL) || (len < 6)) {
+ self->netdev->stats.rx_dropped++;
+ kfree_skb(skb);
+ return TRUE;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4);
+
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
+ pr_debug("%s(): len=%x.rx_buff=%p\n", __func__,
+ len - 4, self->rx_buff.data);
+
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+
+//F01_E
+ } //FIR
+ return TRUE;
+
+}
+
+/*
+ * if frame is received , but no INT ,then use this routine to upload frame.
+ */
+static int upload_rxdata(struct via_ircc_cb *self, int iobase)
+{
+ struct sk_buff *skb;
+ int len;
+ struct st_fifo *st_fifo;
+ st_fifo = &self->st_fifo;
+
+ len = GetRecvByte(iobase, self);
+
+ pr_debug("%s(): len=%x\n", __func__, len);
+
+ if ((len - 4) < 2) {
+ self->netdev->stats.rx_dropped++;
+ return FALSE;
+ }
+
+ skb = dev_alloc_skb(len + 1);
+ if (skb == NULL) {
+ self->netdev->stats.rx_dropped++;
+ return FALSE;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4 + 1);
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
+ st_fifo->tail++;
+ st_fifo->len++;
+ if (st_fifo->tail > MAX_RX_WINDOW)
+ st_fifo->tail = 0;
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
+ RXStart(iobase, ON);
+ } else {
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ }
+ return TRUE;
+}
+
+/*
+ * Implement back to back receive , use this routine to upload data.
+ */
+
+static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
+{
+ struct st_fifo *st_fifo;
+ struct sk_buff *skb;
+ int len;
+ u8 status;
+
+ st_fifo = &self->st_fifo;
+
+ if (CkRxRecv(iobase, self)) {
+ // if still receiving ,then return ,don't upload frame
+ self->RetryCount = 0;
+ SetTimer(iobase, 20);
+ self->RxDataReady++;
+ return FALSE;
+ } else
+ self->RetryCount++;
+
+ if ((self->RetryCount >= 1) ||
+ ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
+ (st_fifo->len >= (MAX_RX_WINDOW))) {
+ while (st_fifo->len > 0) { //upload frame
+ // Put this entry back in fifo
+ if (st_fifo->head > MAX_RX_WINDOW)
+ st_fifo->head = 0;
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ skb = dev_alloc_skb(len + 1 - 4);
+ /*
+ * if frame size, data ptr, or skb ptr are wrong,
+ * then get next entry.
+ */
+ if ((skb == NULL) || (skb->data == NULL) ||
+ (self->rx_buff.data == NULL) || (len < 6)) {
+ self->netdev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ skb_put(skb, len - 4);
+ skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
+
+ pr_debug("%s(): len=%x.head=%x\n", __func__,
+ len - 4, st_fifo->head);
+
+ // Move to next frame
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_bytes += len;
+ self->netdev->stats.rx_packets++;
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ } //while
+ self->RetryCount = 0;
+
+ pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n",
+ __func__, GetHostStatus(iobase), GetRXStatus(iobase));
+
+ /*
+ * if frame is receive complete at this routine ,then upload
+ * frame.
+ */
+ if ((GetRXStatus(iobase) & 0x10) &&
+ (RxCurCount(iobase, self) != self->RxLastCount)) {
+ upload_rxdata(self, iobase);
+ if (irda_device_txqueue_empty(self->netdev))
+ via_ircc_dma_receive(self);
+ }
+ } // timer detect complete
+ else
+ SetTimer(iobase, 4);
+ return TRUE;
+
+}
+
+
+
+/*
+ * Function via_ircc_interrupt (irq, dev_id)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct via_ircc_cb *self = netdev_priv(dev);
+ int iobase;
+ u8 iHostIntType, iRxIntType, iTxIntType;
+
+ iobase = self->io.fir_base;
+ spin_lock(&self->lock);
+ iHostIntType = GetHostStatus(iobase);
+
+ pr_debug("%s(): iHostIntType %02x: %s %s %s %02x\n",
+ __func__, iHostIntType,
+ (iHostIntType & 0x40) ? "Timer" : "",
+ (iHostIntType & 0x20) ? "Tx" : "",
+ (iHostIntType & 0x10) ? "Rx" : "",
+ (iHostIntType & 0x0e) >> 1);
+
+ if ((iHostIntType & 0x40) != 0) { //Timer Event
+ self->EventFlag.TimeOut++;
+ ClearTimerInt(iobase, 1);
+ if (self->io.direction == IO_XMIT) {
+ via_ircc_dma_xmit(self, iobase);
+ }
+ if (self->io.direction == IO_RECV) {
+ /*
+ * frame ready hold too long, must reset.
+ */
+ if (self->RxDataReady > 30) {
+ hwreset(self);
+ if (irda_device_txqueue_empty(self->netdev)) {
+ via_ircc_dma_receive(self);
+ }
+ } else { // call this to upload frame.
+ RxTimerHandler(self, iobase);
+ }
+ } //RECV
+ } //Timer Event
+ if ((iHostIntType & 0x20) != 0) { //Tx Event
+ iTxIntType = GetTXStatus(iobase);
+
+ pr_debug("%s(): iTxIntType %02x: %s %s %s %s\n",
+ __func__, iTxIntType,
+ (iTxIntType & 0x08) ? "FIFO underr." : "",
+ (iTxIntType & 0x04) ? "EOM" : "",
+ (iTxIntType & 0x02) ? "FIFO ready" : "",
+ (iTxIntType & 0x01) ? "Early EOM" : "");
+
+ if (iTxIntType & 0x4) {
+ self->EventFlag.EOMessage++; // read and will auto clean
+ if (via_ircc_dma_xmit_complete(self)) {
+ if (irda_device_txqueue_empty
+ (self->netdev)) {
+ via_ircc_dma_receive(self);
+ }
+ } else {
+ self->EventFlag.Unknown++;
+ }
+ } //EOP
+ } //Tx Event
+ //----------------------------------------
+ if ((iHostIntType & 0x10) != 0) { //Rx Event
+ /* Check if DMA has finished */
+ iRxIntType = GetRXStatus(iobase);
+
+ pr_debug("%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
+ __func__, iRxIntType,
+ (iRxIntType & 0x80) ? "PHY err." : "",
+ (iRxIntType & 0x40) ? "CRC err" : "",
+ (iRxIntType & 0x20) ? "FIFO overr." : "",
+ (iRxIntType & 0x10) ? "EOF" : "",
+ (iRxIntType & 0x08) ? "RxData" : "",
+ (iRxIntType & 0x02) ? "RxMaxLen" : "",
+ (iRxIntType & 0x01) ? "SIR bad" : "");
+ if (!iRxIntType)
+ pr_debug("%s(): RxIRQ =0\n", __func__);
+
+ if (iRxIntType & 0x10) {
+ if (via_ircc_dma_receive_complete(self, iobase)) {
+//F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
+ via_ircc_dma_receive(self);
+ }
+ } // No ERR
+ else { //ERR
+ pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
+ __func__, iRxIntType, iHostIntType,
+ RxCurCount(iobase, self), self->RxLastCount);
+
+ if (iRxIntType & 0x20) { //FIFO OverRun ERR
+ ResetChip(iobase, 0);
+ ResetChip(iobase, 1);
+ } else { //PHY,CRC ERR
+
+ if (iRxIntType != 0x08)
+ hwreset(self); //F01
+ }
+ via_ircc_dma_receive(self);
+ } //ERR
+
+ } //Rx Event
+ spin_unlock(&self->lock);
+ return IRQ_RETVAL(iHostIntType);
+}
+
+static void hwreset(struct via_ircc_cb *self)
+{
+ int iobase;
+ iobase = self->io.fir_base;
+
+ ResetChip(iobase, 5);
+ EnableDMA(iobase, OFF);
+ EnableTX(iobase, OFF);
+ EnableRX(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ EnTXDMA(iobase, OFF);
+ RXStart(iobase, OFF);
+ TXStart(iobase, OFF);
+ InitCard(iobase);
+ CommonInit(iobase);
+ SIRFilter(iobase, ON);
+ SetSIR(iobase, ON);
+ CRC16(iobase, ON);
+ EnTXCRC(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x00);
+ SetBaudRate(iobase, 9600);
+ SetPulseWidth(iobase, 12);
+ SetSendPreambleCount(iobase, 0);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+
+ /* Restore speed. */
+ via_ircc_change_speed(self, self->io.speed);
+
+ self->st_fifo.len = 0;
+}
+
+/*
+ * Function via_ircc_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int via_ircc_is_receiving(struct via_ircc_cb *self)
+{
+ int status = FALSE;
+ int iobase;
+
+ IRDA_ASSERT(self != NULL, return FALSE;);
+
+ iobase = self->io.fir_base;
+ if (CkRxRecv(iobase, self))
+ status = TRUE;
+
+ pr_debug("%s(): status=%x....\n", __func__, status);
+
+ return status;
+}
+
+
+/*
+ * Function via_ircc_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int via_ircc_net_open(struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ int iobase;
+ char hwname[32];
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ dev->stats.rx_packets = 0;
+ IRDA_ASSERT(self != NULL, return 0;);
+ iobase = self->io.fir_base;
+ if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
+ net_warn_ratelimited("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ, and clean up on
+ * failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma=%d\n",
+ driver_name, self->io.dma);
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+ if (self->io.dma2 != self->io.dma) {
+ if (request_dma(self->io.dma2, dev->name)) {
+ net_warn_ratelimited("%s, unable to allocate dma2=%d\n",
+ driver_name, self->io.dma2);
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+ return -EAGAIN;
+ }
+ }
+
+
+ /* turn on interrupts */
+ EnAllInt(iobase, ON);
+ EnInternalLoop(iobase, OFF);
+ EnExternalLoop(iobase, OFF);
+
+ /* */
+ via_ircc_dma_receive(self);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ sprintf(hwname, "VIA @ 0x%x", iobase);
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ self->RxLastCount = 0;
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int via_ircc_net_close(struct net_device *dev)
+{
+ struct via_ircc_cb *self;
+ int iobase;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return 0;);
+
+ /* Stop device */
+ netif_stop_queue(dev);
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ iobase = self->io.fir_base;
+ EnTXDMA(iobase, OFF);
+ EnRXDMA(iobase, OFF);
+ DisableDmaChannel(self->io.dma);
+
+ /* Disable interrupts */
+ EnAllInt(iobase, OFF);
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+ if (self->io.dma2 != self->io.dma)
+ free_dma(self->io.dma2);
+
+ return 0;
+}
+
+/*
+ * Function via_ircc_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct via_ircc_cb *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev != NULL, return -1;);
+ self = netdev_priv(dev);
+ IRDA_ASSERT(self != NULL, return -1;);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
+ cmd);
+ /* Disable interrupts & save flags */
+ spin_lock_irqsave(&self->lock, flags);
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ via_ircc_change_speed(self, irq->ifr_baudrate);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = via_ircc_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ out:
+ spin_unlock_irqrestore(&self->lock, flags);
+ return ret;
+}
+
+MODULE_AUTHOR("VIA Technologies,inc");
+MODULE_DESCRIPTION("VIA IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+module_init(via_ircc_init);
+module_exit(via_ircc_cleanup);
diff --git a/drivers/staging/irda/drivers/via-ircc.h b/drivers/staging/irda/drivers/via-ircc.h
new file mode 100644
index 000000000000..ac1525573398
--- /dev/null
+++ b/drivers/staging/irda/drivers/via-ircc.h
@@ -0,0 +1,846 @@
+/*********************************************************************
+ *
+ * Filename: via-ircc.h
+ * Version: 1.0
+ * Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
+ * Author: VIA Technologies, inc
+ * Date : 08/06/2003
+
+Copyright (c) 1998-2003 VIA Technologies, Inc.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, see <http://www.gnu.org/licenses/>.
+
+ * Comment:
+ * jul/08/2002 : Rx buffer length should use Rx ring ptr.
+ * Oct/28/2002 : Add SB id for 3147 and 3177.
+ * jul/09/2002 : only implement two kind of dongle currently.
+ * Oct/02/2002 : work on VT8231 and VT8233 .
+ * Aug/06/2003 : change driver format to pci driver .
+ ********************************************************************/
+#ifndef via_IRCC_H
+#define via_IRCC_H
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#define MAX_TX_WINDOW 7
+#define MAX_RX_WINDOW 7
+
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[MAX_RX_WINDOW + 2];
+ int pending_bytes;
+ int head;
+ int tail;
+ int len;
+};
+
+struct frame_cb {
+ void *start; /* Start of frame in DMA mem */
+ int len; /* Length of frame in DMA mem */
+};
+
+struct tx_fifo {
+ struct frame_cb queue[MAX_TX_WINDOW + 2]; /* Info about frames in queue */
+ int ptr; /* Currently being sent */
+ int len; /* Length of queue */
+ int free; /* Next free slot */
+ void *tail; /* Next free start in DMA mem */
+};
+
+
+struct eventflag // for keeping track of Interrupt Events
+{
+ //--------tx part
+ unsigned char TxFIFOUnderRun;
+ unsigned char EOMessage;
+ unsigned char TxFIFOReady;
+ unsigned char EarlyEOM;
+ //--------rx part
+ unsigned char PHYErr;
+ unsigned char CRCErr;
+ unsigned char RxFIFOOverRun;
+ unsigned char EOPacket;
+ unsigned char RxAvail;
+ unsigned char TooLargePacket;
+ unsigned char SIRBad;
+ //--------unknown
+ unsigned char Unknown;
+ //----------
+ unsigned char TimeOut;
+ unsigned char RxDMATC;
+ unsigned char TxDMATC;
+};
+
+/* Private data for each instance */
+struct via_ircc_cb {
+ struct st_fifo st_fifo; /* Info about received frames */
+ struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ __u8 ier; /* Interrupt enable register */
+
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 flags; /* Interface flags */
+ __u32 new_speed;
+ int index; /* Instance index */
+
+ struct eventflag EventFlag;
+ unsigned int chip_id; /* to remember chip id */
+ unsigned int RetryCount;
+ unsigned int RxDataReady;
+ unsigned int RxLastCount;
+};
+
+
+//---------I=Infrared, H=Host, M=Misc, T=Tx, R=Rx, ST=Status,
+// CF=Config, CT=Control, L=Low, H=High, C=Count
+#define I_CF_L_0 0x10
+#define I_CF_H_0 0x11
+#define I_SIR_BOF 0x12
+#define I_SIR_EOF 0x13
+#define I_ST_CT_0 0x15
+#define I_ST_L_1 0x16
+#define I_ST_H_1 0x17
+#define I_CF_L_1 0x18
+#define I_CF_H_1 0x19
+#define I_CF_L_2 0x1a
+#define I_CF_H_2 0x1b
+#define I_CF_3 0x1e
+#define H_CT 0x20
+#define H_ST 0x21
+#define M_CT 0x22
+#define TX_CT_1 0x23
+#define TX_CT_2 0x24
+#define TX_ST 0x25
+#define RX_CT 0x26
+#define RX_ST 0x27
+#define RESET 0x28
+#define P_ADDR 0x29
+#define RX_C_L 0x2a
+#define RX_C_H 0x2b
+#define RX_P_L 0x2c
+#define RX_P_H 0x2d
+#define TX_C_L 0x2e
+#define TX_C_H 0x2f
+#define TIMER 0x32
+#define I_CF_4 0x33
+#define I_T_C_L 0x34
+#define I_T_C_H 0x35
+#define VERSION 0x3f
+//-------------------------------
+#define StartAddr 0x10 // the first register address
+#define EndAddr 0x3f // the last register address
+#define GetBit(val,bit) val = (unsigned char) ((val>>bit) & 0x1)
+ // Returns the bit
+#define SetBit(val,bit) val= (unsigned char ) (val | (0x1 << bit))
+ // Sets bit to 1
+#define ResetBit(val,bit) val= (unsigned char ) (val & ~(0x1 << bit))
+ // Sets bit to 0
+
+#define OFF 0
+#define ON 1
+#define DMA_TX_MODE 0x08
+#define DMA_RX_MODE 0x04
+
+#define DMA1 0
+#define DMA2 0xc0
+#define MASK1 DMA1+0x0a
+#define MASK2 DMA2+0x14
+
+#define Clk_bit 0x40
+#define Tx_bit 0x01
+#define Rd_Valid 0x08
+#define RxBit 0x08
+
+static void DisableDmaChannel(unsigned int channel)
+{
+ switch (channel) { // 8 Bit DMA channels DMAC1
+ case 0:
+ outb(4, MASK1); //mask channel 0
+ break;
+ case 1:
+ outb(5, MASK1); //Mask channel 1
+ break;
+ case 2:
+ outb(6, MASK1); //Mask channel 2
+ break;
+ case 3:
+ outb(7, MASK1); //Mask channel 3
+ break;
+ case 5:
+ outb(5, MASK2); //Mask channel 5
+ break;
+ case 6:
+ outb(6, MASK2); //Mask channel 6
+ break;
+ case 7:
+ outb(7, MASK2); //Mask channel 7
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned char ReadLPCReg(int iRegNum)
+{
+ unsigned char iVal;
+
+ outb(0x87, 0x2e);
+ outb(0x87, 0x2e);
+ outb(iRegNum, 0x2e);
+ iVal = inb(0x2f);
+ outb(0xaa, 0x2e);
+
+ return iVal;
+}
+
+static void WriteLPCReg(int iRegNum, unsigned char iVal)
+{
+
+ outb(0x87, 0x2e);
+ outb(0x87, 0x2e);
+ outb(iRegNum, 0x2e);
+ outb(iVal, 0x2f);
+ outb(0xAA, 0x2e);
+}
+
+static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
+{
+ return (__u8) inb(BaseAddr + iRegNum);
+}
+
+static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
+{
+ outb(iVal, BaseAddr + iRegNum);
+}
+
+static int WriteRegBit(unsigned int BaseAddr, unsigned char RegNum,
+ unsigned char BitPos, unsigned char value)
+{
+ __u8 Rtemp, Wtemp;
+
+ if (BitPos > 7) {
+ return -1;
+ }
+ if ((RegNum < StartAddr) || (RegNum > EndAddr))
+ return -1;
+ Rtemp = ReadReg(BaseAddr, RegNum);
+ if (value == 0)
+ Wtemp = ResetBit(Rtemp, BitPos);
+ else {
+ if (value == 1)
+ Wtemp = SetBit(Rtemp, BitPos);
+ else
+ return -1;
+ }
+ WriteReg(BaseAddr, RegNum, Wtemp);
+ return 0;
+}
+
+static __u8 CheckRegBit(unsigned int BaseAddr, unsigned char RegNum,
+ unsigned char BitPos)
+{
+ __u8 temp;
+
+ if (BitPos > 7)
+ return 0xff;
+ if ((RegNum < StartAddr) || (RegNum > EndAddr)) {
+// printf("what is the register %x!\n",RegNum);
+ }
+ temp = ReadReg(BaseAddr, RegNum);
+ return GetBit(temp, BitPos);
+}
+
+static void SetMaxRxPacketSize(__u16 iobase, __u16 size)
+{
+ __u16 low, high;
+ if ((size & 0xe000) == 0) {
+ low = size & 0x00ff;
+ high = (size & 0x1f00) >> 8;
+ WriteReg(iobase, I_CF_L_2, low);
+ WriteReg(iobase, I_CF_H_2, high);
+
+ }
+
+}
+
+//for both Rx and Tx
+
+static void SetFIFO(__u16 iobase, __u16 value)
+{
+ switch (value) {
+ case 128:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 1);
+ break;
+ case 64:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ break;
+ case 32:
+ WriteRegBit(iobase, 0x11, 0, 1);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ break;
+ default:
+ WriteRegBit(iobase, 0x11, 0, 0);
+ WriteRegBit(iobase, 0x11, 7, 0);
+ }
+
+}
+
+#define CRC16(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,7,val) //0 for 32 CRC
+/*
+#define SetVFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,5,val)
+#define SetFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,6,val)
+#define SetMIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,5,val)
+#define SetSIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,4,val)
+*/
+#define SIRFilter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,3,val)
+#define Filter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,2,val)
+#define InvertTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,1,val)
+#define InvertRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,0,val)
+//****************************I_CF_H_0
+#define EnableTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,4,val)
+#define EnableRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,3,val)
+#define EnableDMA(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,2,val)
+#define SIRRecvAny(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,1,val)
+#define DiableTrans(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,0,val)
+//***************************I_SIR_BOF,I_SIR_EOF
+#define SetSIRBOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_BOF,val)
+#define SetSIREOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_EOF,val)
+#define GetSIRBOF(BaseAddr) ReadReg(BaseAddr,I_SIR_BOF)
+#define GetSIREOF(BaseAddr) ReadReg(BaseAddr,I_SIR_EOF)
+//*******************I_ST_CT_0
+#define EnPhys(BaseAddr,val) WriteRegBit(BaseAddr,I_ST_CT_0,7,val)
+#define IsModeError(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,6) //RO
+#define IsVFIROn(BaseAddr) CheckRegBit(BaseAddr,0x14,0) //RO for VT1211 only
+#define IsFIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,5) //RO
+#define IsMIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,4) //RO
+#define IsSIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,3) //RO
+#define IsEnableTX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,2) //RO
+#define IsEnableRX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,1) //RO
+#define Is16CRC(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,0) //RO
+//***************************I_CF_3
+#define DisableAdjacentPulseWidth(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,5,val) //1 disable
+#define DisablePulseWidthAdjust(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,4,val) //1 disable
+#define UseOneRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,1,val) //0 use two RX
+#define SlowIRRXLowActive(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,0,val) //0 show RX high=1 in SIR
+//***************************H_CT
+#define EnAllInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,7,val)
+#define TXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,6,val)
+#define RXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,5,val)
+#define ClearRXInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,4,val) // 1 clear
+//*****************H_ST
+#define IsRXInt(BaseAddr) CheckRegBit(BaseAddr,H_ST,4)
+#define GetIntIndentify(BaseAddr) ((ReadReg(BaseAddr,H_ST)&0xf1) >>1)
+#define IsHostBusy(BaseAddr) CheckRegBit(BaseAddr,H_ST,0)
+#define GetHostStatus(BaseAddr) ReadReg(BaseAddr,H_ST) //RO
+//**************************M_CT
+#define EnTXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,7,val)
+#define EnRXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,6,val)
+#define SwapDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,5,val)
+#define EnInternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,4,val)
+#define EnExternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,3,val)
+//**************************TX_CT_1
+#define EnTXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,4,val) //half empty int (1 half)
+#define EnTXFIFOUnderrunEOMInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,5,val)
+#define EnTXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,6,val) //int when reach it threshold (setting by bit 4)
+//**************************TX_CT_2
+#define ForceUnderrun(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,7,val) // force an underrun int
+#define EnTXCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,6,val) //1 for FIR,MIR...0 (not SIR)
+#define ForceBADCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,5,val) //force an bad CRC
+#define SendSIP(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,4,val) //send indication pulse for prevent SIR disturb
+#define ClearEnTX(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,3,val) // opposite to EnTX
+//*****************TX_ST
+#define GetTXStatus(BaseAddr) ReadReg(BaseAddr,TX_ST) //RO
+//**************************RX_CT
+#define EnRXSpecInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,0,val)
+#define EnRXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,1,val) //enable int when reach it threshold (setting by bit 7)
+#define EnRXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,7,val) //enable int when (1) half full...or (0) just not full
+//*****************RX_ST
+#define GetRXStatus(BaseAddr) ReadReg(BaseAddr,RX_ST) //RO
+//***********************P_ADDR
+#define SetPacketAddr(BaseAddr,addr) WriteReg(BaseAddr,P_ADDR,addr)
+//***********************I_CF_4
+#define EnGPIOtoRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,7,val)
+#define EnTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,1,val)
+#define ClearTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,0,val)
+//***********************I_T_C_L
+#define WriteGIO(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,7,val)
+#define ReadGIO(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,7)
+#define ReadRX(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,3) //RO
+#define WriteTX(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,0,val)
+//***********************I_T_C_H
+#define EnRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_H,7,val)
+#define ReadRX2(BaseAddr) CheckRegBit(BaseAddr,I_T_C_H,7)
+//**********************Version
+#define GetFIRVersion(BaseAddr) ReadReg(BaseAddr,VERSION)
+
+
+static void SetTimer(__u16 iobase, __u8 count)
+{
+ EnTimerInt(iobase, OFF);
+ WriteReg(iobase, TIMER, count);
+ EnTimerInt(iobase, ON);
+}
+
+
+static void SetSendByte(__u16 iobase, __u32 count)
+{
+ __u32 low, high;
+
+ if ((count & 0xf000) == 0) {
+ low = count & 0x00ff;
+ high = (count & 0x0f00) >> 8;
+ WriteReg(iobase, TX_C_L, low);
+ WriteReg(iobase, TX_C_H, high);
+ }
+}
+
+static void ResetChip(__u16 iobase, __u8 type)
+{
+ __u8 value;
+
+ value = (type + 2) << 4;
+ WriteReg(iobase, RESET, type);
+}
+
+static int CkRxRecv(__u16 iobase, struct via_ircc_cb *self)
+{
+ __u8 low, high;
+ __u16 wTmp = 0, wTmp1 = 0, wTmp_new = 0;
+
+ low = ReadReg(iobase, RX_C_L);
+ high = ReadReg(iobase, RX_C_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+ udelay(10);
+ low = ReadReg(iobase, RX_C_L);
+ high = ReadReg(iobase, RX_C_H);
+ wTmp1 = high;
+ wTmp_new = (wTmp1 << 8) | low;
+ if (wTmp_new != wTmp)
+ return 1;
+ else
+ return 0;
+
+}
+
+static __u16 RxCurCount(__u16 iobase, struct via_ircc_cb * self)
+{
+ __u8 low, high;
+ __u16 wTmp = 0, wTmp1 = 0;
+
+ low = ReadReg(iobase, RX_P_L);
+ high = ReadReg(iobase, RX_P_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+ return wTmp;
+}
+
+/* This Routine can only use in recevie_complete
+ * for it will update last count.
+ */
+
+static __u16 GetRecvByte(__u16 iobase, struct via_ircc_cb * self)
+{
+ __u8 low, high;
+ __u16 wTmp, wTmp1, ret;
+
+ low = ReadReg(iobase, RX_P_L);
+ high = ReadReg(iobase, RX_P_H);
+ wTmp1 = high;
+ wTmp = (wTmp1 << 8) | low;
+
+
+ if (wTmp >= self->RxLastCount)
+ ret = wTmp - self->RxLastCount;
+ else
+ ret = (0x8000 - self->RxLastCount) + wTmp;
+ self->RxLastCount = wTmp;
+
+/* RX_P is more actually the RX_C
+ low=ReadReg(iobase,RX_C_L);
+ high=ReadReg(iobase,RX_C_H);
+
+ if(!(high&0xe000)) {
+ temp=(high<<8)+low;
+ return temp;
+ }
+ else return 0;
+*/
+ return ret;
+}
+
+static void Sdelay(__u16 scale)
+{
+ __u8 bTmp;
+ int i, j;
+
+ for (j = 0; j < scale; j++) {
+ for (i = 0; i < 0x20; i++) {
+ bTmp = inb(0xeb);
+ outb(bTmp, 0xeb);
+ }
+ }
+}
+
+static void Tdelay(__u16 scale)
+{
+ __u8 bTmp;
+ int i, j;
+
+ for (j = 0; j < scale; j++) {
+ for (i = 0; i < 0x50; i++) {
+ bTmp = inb(0xeb);
+ outb(bTmp, 0xeb);
+ }
+ }
+}
+
+
+static void ActClk(__u16 iobase, __u8 value)
+{
+ __u8 bTmp;
+ bTmp = ReadReg(iobase, 0x34);
+ if (value)
+ WriteReg(iobase, 0x34, bTmp | Clk_bit);
+ else
+ WriteReg(iobase, 0x34, bTmp & ~Clk_bit);
+}
+
+static void ClkTx(__u16 iobase, __u8 Clk, __u8 Tx)
+{
+ __u8 bTmp;
+
+ bTmp = ReadReg(iobase, 0x34);
+ if (Clk == 0)
+ bTmp &= ~Clk_bit;
+ else {
+ if (Clk == 1)
+ bTmp |= Clk_bit;
+ }
+ WriteReg(iobase, 0x34, bTmp);
+ Sdelay(1);
+ if (Tx == 0)
+ bTmp &= ~Tx_bit;
+ else {
+ if (Tx == 1)
+ bTmp |= Tx_bit;
+ }
+ WriteReg(iobase, 0x34, bTmp);
+}
+
+static void Wr_Byte(__u16 iobase, __u8 data)
+{
+ __u8 bData = data;
+// __u8 btmp;
+ int i;
+
+ ClkTx(iobase, 0, 1);
+
+ Tdelay(2);
+ ActClk(iobase, 1);
+ Tdelay(1);
+
+ for (i = 0; i < 8; i++) { //LDN
+
+ if ((bData >> i) & 0x01) {
+ ClkTx(iobase, 0, 1); //bit data = 1;
+ } else {
+ ClkTx(iobase, 0, 0); //bit data = 1;
+ }
+ Tdelay(2);
+ Sdelay(1);
+ ActClk(iobase, 1); //clk hi
+ Tdelay(1);
+ }
+}
+
+static __u8 Rd_Indx(__u16 iobase, __u8 addr, __u8 index)
+{
+ __u8 data = 0, bTmp, data_bit;
+ int i;
+
+ bTmp = addr | (index << 1) | 0;
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ ActClk(iobase, 1);
+ udelay(1);
+ Wr_Byte(iobase, bTmp);
+ Sdelay(1);
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ for (i = 0; i < 10; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(1);
+ ClkTx(iobase, 0, 1);
+ Tdelay(1);
+ bTmp = ReadReg(iobase, 0x34);
+ if (!(bTmp & Rd_Valid))
+ break;
+ }
+ if (!(bTmp & Rd_Valid)) {
+ for (i = 0; i < 8; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ bTmp = ReadReg(iobase, 0x34);
+ data_bit = 1 << i;
+ if (bTmp & RxBit)
+ data |= data_bit;
+ else
+ data &= ~data_bit;
+ Tdelay(2);
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ bTmp = ReadReg(iobase, 0x34);
+ }
+ for (i = 0; i < 1; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ ClkTx(iobase, 0, 0);
+ Tdelay(1);
+ for (i = 0; i < 3; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(2);
+ }
+ return data;
+}
+
+static void Wr_Indx(__u16 iobase, __u8 addr, __u8 index, __u8 data)
+{
+ int i;
+ __u8 bTmp;
+
+ ClkTx(iobase, 0, 0);
+ udelay(2);
+ ActClk(iobase, 1);
+ udelay(1);
+ bTmp = addr | (index << 1) | 1;
+ Wr_Byte(iobase, bTmp);
+ Wr_Byte(iobase, data);
+ for (i = 0; i < 2; i++) {
+ ClkTx(iobase, 0, 0);
+ Tdelay(2);
+ ActClk(iobase, 1);
+ Tdelay(1);
+ }
+ ActClk(iobase, 0);
+}
+
+static void ResetDongle(__u16 iobase)
+{
+ int i;
+ ClkTx(iobase, 0, 0);
+ Tdelay(1);
+ for (i = 0; i < 30; i++) {
+ ActClk(iobase, 1);
+ Tdelay(1);
+ ActClk(iobase, 0);
+ Tdelay(1);
+ }
+ ActClk(iobase, 0);
+}
+
+static void SetSITmode(__u16 iobase)
+{
+
+ __u8 bTmp;
+
+ bTmp = ReadLPCReg(0x28);
+ WriteLPCReg(0x28, bTmp | 0x10); //select ITMOFF
+ bTmp = ReadReg(iobase, 0x35);
+ WriteReg(iobase, 0x35, bTmp | 0x40); // Driver ITMOFF
+ WriteReg(iobase, 0x28, bTmp | 0x80); // enable All interrupt
+}
+
+static void SI_SetMode(__u16 iobase, int mode)
+{
+ //__u32 dTmp;
+ __u8 bTmp;
+
+ WriteLPCReg(0x28, 0x70); // S/W Reset
+ SetSITmode(iobase);
+ ResetDongle(iobase);
+ udelay(10);
+ Wr_Indx(iobase, 0x40, 0x0, 0x17); //RX ,APEN enable,Normal power
+ Wr_Indx(iobase, 0x40, 0x1, mode); //Set Mode
+ Wr_Indx(iobase, 0x40, 0x2, 0xff); //Set power to FIR VFIR > 1m
+ bTmp = Rd_Indx(iobase, 0x40, 1);
+}
+
+static void InitCard(__u16 iobase)
+{
+ ResetChip(iobase, 5);
+ WriteReg(iobase, I_ST_CT_0, 0x00); // open CHIP on
+ SetSIRBOF(iobase, 0xc0); // hardware default value
+ SetSIREOF(iobase, 0xc1);
+}
+
+static void CommonInit(__u16 iobase)
+{
+// EnTXCRC(iobase,0);
+ SwapDMA(iobase, OFF);
+ SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
+ EnRXFIFOReadyInt(iobase, OFF);
+ EnRXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOHalfLevelInt(iobase, OFF);
+ EnTXFIFOUnderrunEOMInt(iobase, ON);
+// EnTXFIFOReadyInt(iobase,ON);
+ InvertTX(iobase, OFF);
+ InvertRX(iobase, OFF);
+// WriteLPCReg(0xF0,0); //(if VT1211 then do this)
+ if (IsSIROn(iobase)) {
+ SIRFilter(iobase, ON);
+ SIRRecvAny(iobase, ON);
+ } else {
+ SIRFilter(iobase, OFF);
+ SIRRecvAny(iobase, OFF);
+ }
+ EnRXSpecInt(iobase, ON);
+ WriteReg(iobase, I_ST_CT_0, 0x80);
+ EnableDMA(iobase, ON);
+}
+
+static void SetBaudRate(__u16 iobase, __u32 rate)
+{
+ __u8 value = 11, temp;
+
+ if (IsSIROn(iobase)) {
+ switch (rate) {
+ case (__u32) (2400L):
+ value = 47;
+ break;
+ case (__u32) (9600L):
+ value = 11;
+ break;
+ case (__u32) (19200L):
+ value = 5;
+ break;
+ case (__u32) (38400L):
+ value = 2;
+ break;
+ case (__u32) (57600L):
+ value = 1;
+ break;
+ case (__u32) (115200L):
+ value = 0;
+ break;
+ default:
+ break;
+ }
+ } else if (IsMIROn(iobase)) {
+ value = 0; // will automatically be fixed in 1.152M
+ } else if (IsFIROn(iobase)) {
+ value = 0; // will automatically be fixed in 4M
+ }
+ temp = (ReadReg(iobase, I_CF_H_1) & 0x03);
+ temp |= value << 2;
+ WriteReg(iobase, I_CF_H_1, temp);
+}
+
+static void SetPulseWidth(__u16 iobase, __u8 width)
+{
+ __u8 temp, temp1, temp2;
+
+ temp = (ReadReg(iobase, I_CF_L_1) & 0x1f);
+ temp1 = (ReadReg(iobase, I_CF_H_1) & 0xfc);
+ temp2 = (width & 0x07) << 5;
+ temp |= temp2;
+ temp2 = (width & 0x18) >> 3;
+ temp1 |= temp2;
+ WriteReg(iobase, I_CF_L_1, temp);
+ WriteReg(iobase, I_CF_H_1, temp1);
+}
+
+static void SetSendPreambleCount(__u16 iobase, __u8 count)
+{
+ __u8 temp;
+
+ temp = ReadReg(iobase, I_CF_L_1) & 0xe0;
+ temp |= count;
+ WriteReg(iobase, I_CF_L_1, temp);
+
+}
+
+static void SetVFIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, val);
+}
+
+static void SetFIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 6, val);
+}
+
+static void SetMIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 5, val);
+}
+
+static void SetSIR(__u16 BaseAddr, __u8 val)
+{
+ __u8 tmp;
+
+ WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
+ tmp = ReadReg(BaseAddr, I_CF_L_0);
+ WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
+ WriteRegBit(BaseAddr, I_CF_L_0, 4, val);
+}
+
+#endif /* via_IRCC_H */
diff --git a/drivers/staging/irda/drivers/vlsi_ir.c b/drivers/staging/irda/drivers/vlsi_ir.c
new file mode 100644
index 000000000000..6638784c082e
--- /dev/null
+++ b/drivers/staging/irda/drivers/vlsi_ir.c
@@ -0,0 +1,1872 @@
+/*********************************************************************
+ *
+ * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
+ *
+ * Copyright (c) 2001-2003 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#include <linux/module.h>
+
+#define DRIVER_NAME "vlsi_ir"
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147"
+#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>"
+
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/********************************************************/
+
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/crc.h>
+
+#include "vlsi_ir.h"
+
+/********************************************************/
+
+static /* const */ char drivername[] = DRIVER_NAME;
+
+static const struct pci_device_id vlsi_irda_table[] = {
+ {
+ .class = PCI_CLASS_WIRELESS_IRDA << 8,
+ .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
+ .vendor = PCI_VENDOR_ID_VLSI,
+ .device = PCI_DEVICE_ID_VLSI_82C147,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
+
+/********************************************************/
+
+/* clksrc: which clock source to be used
+ * 0: auto - try PLL, fallback to 40MHz XCLK
+ * 1: on-chip 48MHz PLL
+ * 2: external 48MHz XCLK
+ * 3: external 40MHz XCLK (HP OB-800)
+ */
+
+static int clksrc = 0; /* default is 0(auto) */
+module_param(clksrc, int, 0);
+MODULE_PARM_DESC(clksrc, "clock input source selection");
+
+/* ringsize: size of the tx and rx descriptor rings
+ * independent for tx and rx
+ * specify as ringsize=tx[,rx]
+ * allowed values: 4, 8, 16, 32, 64
+ * Due to the IrDA 1.x max. allowed window size=7,
+ * there should be no gain when using rings larger than 8
+ */
+
+static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */
+module_param_array(ringsize, int, NULL, 0);
+MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
+
+/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
+ * 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
+ * 1: nominal 3/16 bittime width
+ * note: IrDA compliant peer devices should be happy regardless
+ * which one is used. Primary goal is to save some power
+ * on the sender's side - at 9.6kbaud for example the short
+ * pulse width saves more than 90% of the transmitted IR power.
+ */
+
+static int sirpulse = 1; /* default is 3/16 bittime */
+module_param(sirpulse, int, 0);
+MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
+
+/* qos_mtt_bits: encoded min-turn-time value we require the peer device
+ * to use before transmitting to us. "Type 1" (per-station)
+ * bitfield according to IrLAP definition (section 6.6.8)
+ * Don't know which transceiver is used by my OB800 - the
+ * pretty common HP HDLS-1100 requires 1 msec - so lets use this.
+ */
+
+static int qos_mtt_bits = 0x07; /* default is 1 ms or more */
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
+
+/********************************************************/
+
+static void vlsi_reg_debug(unsigned iobase, const char *s)
+{
+ int i;
+
+ printk(KERN_DEBUG "%s: ", s);
+ for (i = 0; i < 0x20; i++)
+ printk("%02x", (unsigned)inb((iobase+i)));
+ printk("\n");
+}
+
+static void vlsi_ring_debug(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i;
+
+ printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
+ atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
+ printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
+ __func__, (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
+
+/********************************************************/
+
+/* needed regardless of CONFIG_PROC_FS */
+static struct proc_dir_entry *vlsi_proc_root = NULL;
+
+#ifdef CONFIG_PROC_FS
+
+static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
+{
+ unsigned iobase = pci_resource_start(pdev, 0);
+ unsigned i;
+
+ seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
+ pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
+ seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
+ seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
+ pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
+ seq_printf(seq, "hw registers: ");
+ for (i = 0; i < 0x20; i++)
+ seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
+ seq_printf(seq, "\n");
+}
+
+static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ u8 byte;
+ u16 word;
+ s32 sec, usec;
+ unsigned iobase = ndev->base_addr;
+
+ seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
+ netif_device_present(ndev) ? "attached" : "detached",
+ netif_running(ndev) ? "running" : "not running",
+ netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
+ netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
+
+ if (!netif_running(ndev))
+ return;
+
+ seq_printf(seq, "\nhw-state:\n");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
+ seq_printf(seq, "IRMISC:%s%s%s uart%s",
+ (byte&IRMISC_IRRAIL) ? " irrail" : "",
+ (byte&IRMISC_IRPD) ? " irpd" : "",
+ (byte&IRMISC_UARTTST) ? " uarttest" : "",
+ (byte&IRMISC_UARTEN) ? "@" : " disabled\n");
+ if (byte&IRMISC_UARTEN) {
+ seq_printf(seq, "0x%s\n",
+ (byte&2) ? ((byte&1) ? "3e8" : "2e8")
+ : ((byte&1) ? "3f8" : "2f8"));
+ }
+ pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
+ seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
+ (byte&CLKCTL_PD_INV) ? "powered" : "down",
+ (byte&CLKCTL_LOCK) ? " locked" : "",
+ (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
+ (byte&CLKCTL_CLKSTP) ? "stopped" : "running",
+ (byte&CLKCTL_WAKE) ? "enabled" : "disabled");
+ pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
+ seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
+
+ byte = inb(iobase+VLSI_PIO_IRINTR);
+ seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n",
+ (byte&IRINTR_ACTEN) ? " ACTEN" : "",
+ (byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
+ (byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
+ (byte&IRINTR_OE_EN) ? " OE_EN" : "",
+ (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
+ (byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
+ (byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
+ (byte&IRINTR_OE_INT) ? " OE_INT" : "");
+ word = inw(iobase+VLSI_PIO_RINGPTR);
+ seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
+ word = inw(iobase+VLSI_PIO_RINGBASE);
+ seq_printf(seq, "RINGBASE: busmap=0x%08x\n",
+ ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
+ word = inw(iobase+VLSI_PIO_RINGSIZE);
+ seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
+ RINGSIZE_TO_TXSIZE(word));
+
+ word = inw(iobase+VLSI_PIO_IRCFG);
+ seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ (word&IRCFG_LOOP) ? " LOOP" : "",
+ (word&IRCFG_ENTX) ? " ENTX" : "",
+ (word&IRCFG_ENRX) ? " ENRX" : "",
+ (word&IRCFG_MSTR) ? " MSTR" : "",
+ (word&IRCFG_RXANY) ? " RXANY" : "",
+ (word&IRCFG_CRC16) ? " CRC16" : "",
+ (word&IRCFG_FIR) ? " FIR" : "",
+ (word&IRCFG_MIR) ? " MIR" : "",
+ (word&IRCFG_SIR) ? " SIR" : "",
+ (word&IRCFG_SIRFILT) ? " SIRFILT" : "",
+ (word&IRCFG_SIRTEST) ? " SIRTEST" : "",
+ (word&IRCFG_TXPOL) ? " TXPOL" : "",
+ (word&IRCFG_RXPOL) ? " RXPOL" : "");
+ word = inw(iobase+VLSI_PIO_IRENABLE);
+ seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n",
+ (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "",
+ (word&IRENABLE_CFGER) ? " CFGERR" : "",
+ (word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
+ (word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
+ (word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
+ (word&IRENABLE_ENTXST) ? " ENTXST" : "",
+ (word&IRENABLE_ENRXST) ? " ENRXST" : "",
+ (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
+ word = inw(iobase+VLSI_PIO_PHYCTL);
+ seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_NPHYCTL);
+ seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
+ (unsigned)PHYCTL_TO_BAUD(word),
+ (unsigned)PHYCTL_TO_PLSWID(word),
+ (unsigned)PHYCTL_TO_PREAMB(word));
+ word = inw(iobase+VLSI_PIO_MAXPKT);
+ seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word);
+ word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
+
+ seq_printf(seq, "\nsw-state:\n");
+ seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
+ (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
+ sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
+ USEC_PER_SEC, &usec);
+ seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
+
+ seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
+ ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
+ ndev->stats.rx_dropped);
+ seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
+ ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
+ ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
+ seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
+ ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
+ ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
+
+}
+
+static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i, j;
+ int h, t;
+
+ seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
+ r->size, r->mask, r->len, r->dir, r->rd[0].hw);
+ h = atomic_read(&r->head) & r->mask;
+ t = atomic_read(&r->tail) & r->mask;
+ seq_printf(seq, "head = %d / tail = %d ", h, t);
+ if (h == t)
+ seq_printf(seq, "(empty)\n");
+ else {
+ if (((t+1)&r->mask) == h)
+ seq_printf(seq, "(full)\n");
+ else
+ seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
+ rd = &r->rd[h];
+ j = (unsigned) rd_get_count(rd);
+ seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n",
+ h, (unsigned)rd_get_status(rd), j);
+ if (j > 0) {
+ seq_printf(seq, " data: %*ph\n",
+ min_t(unsigned, j, 20), rd->buf);
+ }
+ }
+ for (i = 0; i < r->size; i++) {
+ rd = &r->rd[i];
+ seq_printf(seq, "> ring descr %u: ", i);
+ seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
+ seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n",
+ (unsigned) rd_get_status(rd),
+ (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
+ }
+}
+
+static int vlsi_seq_show(struct seq_file *seq, void *v)
+{
+ struct net_device *ndev = seq->private;
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ unsigned long flags;
+
+ seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
+ seq_printf(seq, "clksrc: %s\n",
+ (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
+ : ((clksrc==1)?"48MHz PLL":"autodetect"));
+ seq_printf(seq, "ringsize: tx=%d / rx=%d\n",
+ ringsize[0], ringsize[1]);
+ seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
+ seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
+
+ spin_lock_irqsave(&idev->lock, flags);
+ if (idev->pdev != NULL) {
+ vlsi_proc_pdev(seq, idev->pdev);
+
+ if (idev->pdev->current_state == 0)
+ vlsi_proc_ndev(seq, ndev);
+ else
+ seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
+ idev->resume_ok);
+ if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
+ seq_printf(seq, "\n--------- RX ring -----------\n\n");
+ vlsi_proc_ring(seq, idev->rx_ring);
+ seq_printf(seq, "\n--------- TX ring -----------\n\n");
+ vlsi_proc_ring(seq, idev->tx_ring);
+ }
+ }
+ seq_printf(seq, "\n");
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ return 0;
+}
+
+static int vlsi_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vlsi_seq_show, PDE_DATA(inode));
+}
+
+static const struct file_operations vlsi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = vlsi_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define VLSI_PROC_FOPS (&vlsi_proc_fops)
+
+#else /* CONFIG_PROC_FS */
+#define VLSI_PROC_FOPS NULL
+#endif
+
+/********************************************************/
+
+static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
+ unsigned size, unsigned len, int dir)
+{
+ struct vlsi_ring *r;
+ struct ring_descr *rd;
+ unsigned i, j;
+ dma_addr_t busaddr;
+
+ if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */
+ return NULL;
+
+ r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
+ if (!r)
+ return NULL;
+ memset(r, 0, sizeof(*r));
+
+ r->pdev = pdev;
+ r->dir = dir;
+ r->len = len;
+ r->rd = (struct ring_descr *)(r+1);
+ r->mask = size - 1;
+ r->size = size;
+ atomic_set(&r->head, 0);
+ atomic_set(&r->tail, 0);
+
+ for (i = 0; i < size; i++) {
+ rd = r->rd + i;
+ memset(rd, 0, sizeof(*rd));
+ rd->hw = hwmap + i;
+ rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
+ if (rd->buf)
+ busaddr = pci_map_single(pdev, rd->buf, len, dir);
+ if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
+ if (rd->buf) {
+ net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
+ __func__, rd->buf);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ for (j = 0; j < i; j++) {
+ rd = r->rd + j;
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ pci_unmap_single(pdev, busaddr, len, dir);
+ kfree(rd->buf);
+ rd->buf = NULL;
+ }
+ kfree(r);
+ return NULL;
+ }
+ rd_set_addr_status(rd, busaddr, 0);
+ /* initially, the dma buffer is owned by the CPU */
+ rd->skb = NULL;
+ }
+ return r;
+}
+
+static int vlsi_free_ring(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+ unsigned i;
+ dma_addr_t busaddr;
+
+ for (i = 0; i < r->size; i++) {
+ rd = r->rd + i;
+ if (rd->skb)
+ dev_kfree_skb_any(rd->skb);
+ busaddr = rd_get_addr(rd);
+ rd_set_addr_status(rd, 0, 0);
+ if (busaddr)
+ pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
+ kfree(rd->buf);
+ }
+ kfree(r);
+ return 0;
+}
+
+static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
+{
+ char *ringarea;
+ struct ring_descr_hw *hwmap;
+
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
+ &idev->busaddr);
+ if (!ringarea)
+ goto out;
+
+ hwmap = (struct ring_descr_hw *)ringarea;
+ idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
+ XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (idev->rx_ring == NULL)
+ goto out_unmap;
+
+ hwmap += MAX_RING_DESCR;
+ idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
+ XFER_BUF_SIZE, PCI_DMA_TODEVICE);
+ if (idev->tx_ring == NULL)
+ goto out_free_rx;
+
+ idev->virtaddr = ringarea;
+ return 0;
+
+out_free_rx:
+ vlsi_free_ring(idev->rx_ring);
+out_unmap:
+ idev->rx_ring = idev->tx_ring = NULL;
+ pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
+ idev->busaddr = 0;
+out:
+ return -ENOMEM;
+}
+
+static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
+{
+ vlsi_free_ring(idev->rx_ring);
+ vlsi_free_ring(idev->tx_ring);
+ idev->rx_ring = idev->tx_ring = NULL;
+
+ if (idev->busaddr)
+ pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
+
+ idev->virtaddr = NULL;
+ idev->busaddr = 0;
+
+ return 0;
+}
+
+/********************************************************/
+
+static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int crclen, len = 0;
+ struct sk_buff *skb;
+ int ret = 0;
+ struct net_device *ndev = pci_get_drvdata(r->pdev);
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_RX_ERROR) {
+ if (status & RD_RX_OVER)
+ ret |= VLSI_RX_OVER;
+ if (status & RD_RX_LENGTH)
+ ret |= VLSI_RX_LENGTH;
+ if (status & RD_RX_PHYERR)
+ ret |= VLSI_RX_FRAME;
+ if (status & RD_RX_CRCERR)
+ ret |= VLSI_RX_CRC;
+ goto done;
+ }
+
+ len = rd_get_count(rd);
+ crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
+ len -= crclen; /* remove trailing CRC */
+ if (len <= 0) {
+ pr_debug("%s: strange frame (len=%d)\n", __func__, len);
+ ret |= VLSI_RX_DROP;
+ goto done;
+ }
+
+ if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */
+
+ /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the
+ * endian-adjustment there just in place will dirty a cache line
+ * which belongs to the map and thus we must be sure it will
+ * get flushed before giving the buffer back to hardware.
+ * vlsi_fill_rx() will do this anyway - but here we rely on.
+ */
+ le16_to_cpus(rd->buf+len);
+ if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
+ pr_debug("%s: crc error\n", __func__);
+ ret |= VLSI_RX_CRC;
+ goto done;
+ }
+ }
+
+ if (!rd->skb) {
+ net_warn_ratelimited("%s: rx packet lost\n", __func__);
+ ret |= VLSI_RX_DROP;
+ goto done;
+ }
+
+ skb = rd->skb;
+ rd->skb = NULL;
+ skb->dev = ndev;
+ skb_put_data(skb, rd->buf, len);
+ skb_reset_mac_header(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+done:
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ /* buffer still owned by CPU */
+
+ return (ret) ? -ret : len;
+}
+
+static void vlsi_fill_rx(struct vlsi_ring *r)
+{
+ struct ring_descr *rd;
+
+ for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
+ if (rd_is_active(rd)) {
+ net_warn_ratelimited("%s: driver bug: rx descr race with hw\n",
+ __func__);
+ vlsi_ring_debug(r);
+ break;
+ }
+ if (!rd->skb) {
+ rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
+ if (rd->skb) {
+ skb_reserve(rd->skb,1);
+ rd->skb->protocol = htons(ETH_P_IRDA);
+ }
+ else
+ break; /* probably not worth logging? */
+ }
+ /* give dma buffer back to busmaster */
+ pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ rd_activate(rd);
+ }
+}
+
+static void vlsi_rx_interrupt(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ ndev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ ndev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ ndev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ ndev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ ndev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += ret;
+ }
+ }
+
+ idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
+
+ vlsi_fill_rx(r);
+
+ if (ring_first(r) == NULL) {
+ /* we are in big trouble, if this should ever happen */
+ net_err_ratelimited("%s: rx ring exhausted!\n", __func__);
+ vlsi_ring_debug(r);
+ }
+ else
+ outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
+}
+
+/* caller must have stopped the controller from busmastering */
+
+static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
+{
+ struct net_device *ndev = pci_get_drvdata(idev->pdev);
+ struct vlsi_ring *r = idev->rx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ if (rd_get_count(rd)) {
+ pr_debug("%s - dropping rx packet\n", __func__);
+ ret = -VLSI_RX_DROP;
+ }
+ rd_set_count(rd, 0);
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ }
+ else
+ ret = vlsi_process_rx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.rx_errors++;
+ if (ret & VLSI_RX_DROP)
+ ndev->stats.rx_dropped++;
+ if (ret & VLSI_RX_OVER)
+ ndev->stats.rx_over_errors++;
+ if (ret & VLSI_RX_LENGTH)
+ ndev->stats.rx_length_errors++;
+ if (ret & VLSI_RX_FRAME)
+ ndev->stats.rx_frame_errors++;
+ if (ret & VLSI_RX_CRC)
+ ndev->stats.rx_crc_errors++;
+ }
+ else if (ret > 0) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += ret;
+ }
+ }
+}
+
+/********************************************************/
+
+static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
+{
+ u16 status;
+ int len;
+ int ret;
+
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ /* dma buffer now owned by the CPU */
+ status = rd_get_status(rd);
+ if (status & RD_TX_UNDRN)
+ ret = VLSI_TX_FIFO;
+ else
+ ret = 0;
+ rd_set_status(rd, 0);
+
+ if (rd->skb) {
+ len = rd->skb->len;
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ else /* tx-skb already freed? - should never happen */
+ len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */
+
+ rd_set_count(rd, 0);
+ /* dma buffer still owned by the CPU */
+
+ return (ret) ? -ret : len;
+}
+
+static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
+{
+ u16 nphyctl;
+ u16 config;
+ unsigned mode;
+ int ret;
+ int baudrate;
+ int fifocnt;
+
+ baudrate = idev->new_baud;
+ pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
+ if (baudrate == 4000000) {
+ mode = IFF_FIR;
+ config = IRCFG_FIR;
+ nphyctl = PHYCTL_FIR;
+ }
+ else if (baudrate == 1152000) {
+ mode = IFF_MIR;
+ config = IRCFG_MIR | IRCFG_CRC16;
+ nphyctl = PHYCTL_MIR(clksrc==3);
+ }
+ else {
+ mode = IFF_SIR;
+ config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
+ switch(baudrate) {
+ default:
+ net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n",
+ __func__, baudrate);
+ baudrate = 9600;
+ /* fallthru */
+ case 2400:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
+ break;
+ }
+ }
+ config |= IRCFG_MSTR | IRCFG_ENRX;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt);
+ }
+
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(config, iobase+VLSI_PIO_IRCFG);
+ outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
+ wmb();
+ outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);
+ mb();
+
+ udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */
+
+ /* read back settings for validation */
+
+ config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
+
+ if (mode == IFF_FIR)
+ config ^= IRENABLE_FIR_ON;
+ else if (mode == IFF_MIR)
+ config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);
+ else
+ config ^= IRENABLE_SIR_ON;
+
+ if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
+ net_warn_ratelimited("%s: failed to set %s mode!\n",
+ __func__,
+ mode == IFF_SIR ? "SIR" :
+ mode == IFF_MIR ? "MIR" : "FIR");
+ ret = -1;
+ }
+ else {
+ if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
+ net_warn_ratelimited("%s: failed to apply baudrate %d\n",
+ __func__, baudrate);
+ ret = -1;
+ }
+ else {
+ idev->mode = mode;
+ idev->baud = baudrate;
+ idev->new_baud = 0;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ vlsi_reg_debug(iobase,__func__);
+
+ return ret;
+}
+
+static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned long flags;
+ unsigned iobase = ndev->base_addr;
+ u8 status;
+ u16 config;
+ int mtt, diff;
+ int len, speed;
+ char *msg = NULL;
+
+ speed = irda_get_next_speed(skb);
+ spin_lock_irqsave(&idev->lock, flags);
+ if (speed != -1 && speed != idev->baud) {
+ netif_stop_queue(ndev);
+ idev->new_baud = speed;
+ status = RD_TX_CLRENTX; /* stop tx-ring after this frame */
+ }
+ else
+ status = 0;
+
+ if (skb->len == 0) {
+ /* handle zero packets - should be speed change */
+ if (status == 0) {
+ msg = "bogus zero-length packet";
+ goto drop_unlock;
+ }
+
+ /* due to the completely asynch tx operation we might have
+ * IrLAP racing with the hardware here, f.e. if the controller
+ * is just sending the last packet with current speed while
+ * the LAP is already switching the speed using synchronous
+ * len=0 packet. Immediate execution would lead to hw lockup
+ * requiring a powercycle to reset. Good candidate to trigger
+ * this is the final UA:RSP packet after receiving a DISC:CMD
+ * when getting the LAP down.
+ * Note that we are not protected by the queue_stop approach
+ * because the final UA:RSP arrives _without_ request to apply
+ * new-speed-after-this-packet - hence the driver doesn't know
+ * this was the last packet and doesn't stop the queue. So the
+ * forced switch to default speed from LAP gets through as fast
+ * as only some 10 usec later while the UA:RSP is still processed
+ * by the hardware and we would get screwed.
+ */
+
+ if (ring_first(idev->tx_ring) == NULL) {
+ /* no race - tx-ring already empty */
+ vlsi_set_baud(idev, iobase);
+ netif_wake_queue(ndev);
+ }
+ else
+ ;
+ /* keep the speed change pending like it would
+ * for any len>0 packet. tx completion interrupt
+ * will apply it when the tx ring becomes empty.
+ */
+ spin_unlock_irqrestore(&idev->lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* sanity checks - simply drop the packet */
+
+ rd = ring_last(r);
+ if (!rd) {
+ msg = "ring full, but queue wasn't stopped";
+ goto drop_unlock;
+ }
+
+ if (rd_is_active(rd)) {
+ msg = "entry still owned by hw";
+ goto drop_unlock;
+ }
+
+ if (!rd->buf) {
+ msg = "tx ring entry without pci buffer";
+ goto drop_unlock;
+ }
+
+ if (rd->skb) {
+ msg = "ring entry with old skb still attached";
+ goto drop_unlock;
+ }
+
+ /* no need for serialization or interrupt disable during mtt */
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ if ((mtt = irda_get_mtt(skb)) > 0) {
+ diff = ktime_us_delta(ktime_get(), idev->last_rx);
+ if (mtt > diff)
+ udelay(mtt - diff);
+ /* must not sleep here - called under netif_tx_lock! */
+ }
+
+ /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
+ * after subsequent tx-completion
+ */
+
+ if (idev->mode == IFF_SIR) {
+ status |= RD_TX_DISCRC; /* no hw-crc creation */
+ len = async_wrap_skb(skb, rd->buf, r->len);
+
+ /* Some rare worst case situation in SIR mode might lead to
+ * potential buffer overflow. The wrapper detects this, returns
+ * with a shortened frame (without FCS/EOF) but doesn't provide
+ * any error indication about the invalid packet which we are
+ * going to transmit.
+ * Therefore we log if the buffer got filled to the point, where the
+ * wrapper would abort, i.e. when there are less than 5 bytes left to
+ * allow appending the FCS/EOF.
+ */
+
+ if (len >= r->len-5)
+ net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n",
+ __func__);
+ }
+ else {
+ /* hw deals with MIR/FIR mode wrapping */
+ status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */
+ len = skb->len;
+ if (len > r->len) {
+ msg = "frame exceeds tx buffer length";
+ goto drop;
+ }
+ else
+ skb_copy_from_linear_data(skb, rd->buf, len);
+ }
+
+ rd->skb = skb; /* remember skb for tx-complete stats */
+
+ rd_set_count(rd, len);
+ rd_set_status(rd, status); /* not yet active! */
+
+ /* give dma buffer back to busmaster-hw (flush caches to make
+ * CPU-driven changes visible from the pci bus).
+ */
+
+ pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
+
+/* Switching to TX mode here races with the controller
+ * which may stop TX at any time when fetching an inactive descriptor
+ * or one with CLR_ENTX set. So we switch on TX only, if TX was not running
+ * _after_ the new descriptor was activated on the ring. This ensures
+ * we will either find TX already stopped or we can be sure, there
+ * will be a TX-complete interrupt even if the chip stopped doing
+ * TX just after we found it still running. The ISR will then find
+ * the non-empty ring and restart TX processing. The enclosing
+ * spinlock provides the correct serialization to prevent race with isr.
+ */
+
+ spin_lock_irqsave(&idev->lock,flags);
+
+ rd_activate(rd);
+
+ if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
+ }
+
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ mb();
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ wmb();
+ outw(0, iobase+VLSI_PIO_PROMPT);
+ }
+
+ if (ring_put(r) == NULL) {
+ netif_stop_queue(ndev);
+ pr_debug("%s: tx ring full - queue stopped\n", __func__);
+ }
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ return NETDEV_TX_OK;
+
+drop_unlock:
+ spin_unlock_irqrestore(&idev->lock, flags);
+drop:
+ net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg);
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_errors++;
+ ndev->stats.tx_dropped++;
+ /* Don't even think about returning NET_XMIT_DROP (=1) here!
+ * In fact any retval!=0 causes the packet scheduler to requeue the
+ * packet for later retry of transmission - which isn't exactly
+ * what we want after we've just called dev_kfree_skb_any ;-)
+ */
+ return NETDEV_TX_OK;
+}
+
+static void vlsi_tx_interrupt(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ unsigned iobase;
+ int ret;
+ u16 config;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ if (rd_is_active(rd))
+ break;
+
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ ndev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ ndev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += ret;
+ }
+ }
+
+ iobase = ndev->base_addr;
+
+ if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */
+ vlsi_set_baud(idev, iobase);
+
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ if (rd == NULL) /* tx ring empty: re-enable rx */
+ outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
+
+ else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ int fifocnt;
+
+ fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ if (fifocnt != 0) {
+ pr_debug("%s: rx fifo not empty(%d)\n",
+ __func__, fifocnt);
+ }
+ outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
+ }
+
+ outw(0, iobase+VLSI_PIO_PROMPT);
+
+ if (netif_queue_stopped(ndev) && !idev->new_baud) {
+ netif_wake_queue(ndev);
+ pr_debug("%s: queue awoken\n", __func__);
+ }
+}
+
+/* caller must have stopped the controller from busmastering */
+
+static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
+{
+ struct net_device *ndev = pci_get_drvdata(idev->pdev);
+ struct vlsi_ring *r = idev->tx_ring;
+ struct ring_descr *rd;
+ int ret;
+
+ for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
+
+ ret = 0;
+ if (rd_is_active(rd)) {
+ rd_set_status(rd, 0);
+ rd_set_count(rd, 0);
+ pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
+ if (rd->skb) {
+ dev_kfree_skb_any(rd->skb);
+ rd->skb = NULL;
+ }
+ pr_debug("%s - dropping tx packet\n", __func__);
+ ret = -VLSI_TX_DROP;
+ }
+ else
+ ret = vlsi_process_tx(r, rd);
+
+ if (ret < 0) {
+ ret = -ret;
+ ndev->stats.tx_errors++;
+ if (ret & VLSI_TX_DROP)
+ ndev->stats.tx_dropped++;
+ if (ret & VLSI_TX_FIFO)
+ ndev->stats.tx_fifo_errors++;
+ }
+ else if (ret > 0){
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += ret;
+ }
+ }
+
+}
+
+/********************************************************/
+
+static int vlsi_start_clock(struct pci_dev *pdev)
+{
+ u8 clkctl, lock;
+ int i, count;
+
+ if (clksrc < 2) { /* auto or PLL: try PLL */
+ clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* procedure to detect PLL lock synchronisation:
+ * after 0.5 msec initial delay we expect to find 3 PLL lock
+ * indications within 10 msec for successful PLL detection.
+ */
+ udelay(500);
+ count = 0;
+ for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
+ if (lock&CLKCTL_LOCK) {
+ if (++count >= 3)
+ break;
+ }
+ udelay(50);
+ }
+ if (count < 3) {
+ if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
+ net_err_ratelimited("%s: no PLL or failed to lock!\n",
+ __func__);
+ clkctl = CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+ return -1;
+ }
+ else /* was: clksrc=0(auto) */
+ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
+
+ pr_debug("%s: PLL not locked, fallback to clksrc=%d\n",
+ __func__, clksrc);
+ }
+ else
+ clksrc = 1; /* got successful PLL lock */
+ }
+
+ if (clksrc != 1) {
+ /* we get here if either no PLL detected in auto-mode or
+ an external clock source was explicitly specified */
+
+ clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
+ if (clksrc == 3)
+ clkctl |= CLKCTL_XCKSEL;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* no way to test for working XCLK */
+ }
+ else
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+
+ /* ok, now going to connect the chip with the clock source */
+
+ clkctl &= ~CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ return 0;
+}
+
+static void vlsi_stop_clock(struct pci_dev *pdev)
+{
+ u8 clkctl;
+
+ /* disconnect chip from clock source */
+ pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
+ clkctl |= CLKCTL_CLKSTP;
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+
+ /* disable all clock sources */
+ clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
+ pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
+}
+
+/********************************************************/
+
+/* writing all-zero to the VLSI PCI IO register area seems to prevent
+ * some occasional situations where the hardware fails (symptoms are
+ * what appears as stalled tx/rx state machines, i.e. everything ok for
+ * receive or transmit but hw makes no progress or is unable to access
+ * the bus memory locations).
+ * Best place to call this is immediately after/before the internal clock
+ * gets started/stopped.
+ */
+
+static inline void vlsi_clear_regs(unsigned iobase)
+{
+ unsigned i;
+ const unsigned chip_io_extent = 32;
+
+ for (i = 0; i < chip_io_extent; i += sizeof(u16))
+ outw(0, iobase + i);
+}
+
+static int vlsi_init_chip(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ unsigned iobase;
+ u16 ptr;
+
+ /* start the clock and clean the registers */
+
+ if (vlsi_start_clock(pdev)) {
+ net_err_ratelimited("%s: no valid clock source\n", __func__);
+ return -1;
+ }
+ iobase = ndev->base_addr;
+ vlsi_clear_regs(iobase);
+
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
+
+ outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
+
+ /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
+
+ outw(0, iobase+VLSI_PIO_IRCFG);
+ wmb();
+
+ outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
+
+ outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
+
+ outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
+ iobase+VLSI_PIO_RINGSIZE);
+
+ ptr = inw(iobase+VLSI_PIO_RINGPTR);
+ atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
+ atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
+ atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
+
+ vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */
+
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
+ wmb();
+
+ /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
+ * basically every received pulse fires an ACTIVITY-INT
+ * leading to >>1000 INT's per second instead of few 10
+ */
+
+ outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
+
+ return 0;
+}
+
+static int vlsi_start_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ u8 byte;
+
+ /* we don't use the legacy UART, disable its address decoding */
+
+ pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
+ byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
+ pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
+
+ /* enable PCI busmaster access to our 16MB page */
+
+ pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
+ pci_set_master(pdev);
+
+ if (vlsi_init_chip(pdev) < 0) {
+ pci_disable_device(pdev);
+ return -1;
+ }
+
+ vlsi_fill_rx(idev->rx_ring);
+
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
+
+ outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
+
+ return 0;
+}
+
+static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
+{
+ struct pci_dev *pdev = idev->pdev;
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ unsigned iobase = ndev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&idev->lock,flags);
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
+
+ /* disable and w/c irqs */
+ outb(0, iobase+VLSI_PIO_IRINTR);
+ wmb();
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ vlsi_unarm_tx(idev);
+ vlsi_unarm_rx(idev);
+
+ vlsi_clear_regs(iobase);
+ vlsi_stop_clock(pdev);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**************************************************************/
+
+static void vlsi_tx_timeout(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+
+
+ vlsi_reg_debug(ndev->base_addr, __func__);
+ vlsi_ring_debug(idev->tx_ring);
+
+ if (netif_running(ndev))
+ netif_stop_queue(ndev);
+
+ vlsi_stop_hw(idev);
+
+ /* now simply restart the whole thing */
+
+ if (!idev->new_baud)
+ idev->new_baud = idev->baud; /* keep current baudrate */
+
+ if (vlsi_start_hw(idev))
+ net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n",
+ __func__, pci_name(idev->pdev), ndev->name);
+ else
+ netif_start_queue(ndev);
+}
+
+static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct if_irda_req *irq = (struct if_irda_req *) rq;
+ unsigned long flags;
+ u16 fifocnt;
+ int ret = 0;
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ spin_lock_irqsave(&idev->lock, flags);
+ idev->new_baud = irq->ifr_baudrate;
+ /* when called from userland there might be a minor race window here
+ * if the stack tries to change speed concurrently - which would be
+ * pretty strange anyway with the userland having full control...
+ */
+ vlsi_set_baud(idev, ndev->base_addr);
+ spin_unlock_irqrestore(&idev->lock, flags);
+ break;
+ case SIOCSMEDIABUSY:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ irda_device_set_media_busy(ndev, TRUE);
+ break;
+ case SIOCGRECEIVING:
+ /* the best we can do: check whether there are any bytes in rx fifo.
+ * The trustable window (in case some data arrives just afterwards)
+ * may be as short as 1usec or so at 4Mbps.
+ */
+ fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
+ irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
+ break;
+ default:
+ net_warn_ratelimited("%s: notsupp - cmd=%04x\n",
+ __func__, cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+/********************************************************/
+
+static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *ndev = dev_instance;
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ unsigned iobase;
+ u8 irintr;
+ int boguscount = 5;
+ unsigned long flags;
+ int handled = 0;
+
+ iobase = ndev->base_addr;
+ spin_lock_irqsave(&idev->lock,flags);
+ do {
+ irintr = inb(iobase+VLSI_PIO_IRINTR);
+ mb();
+ outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
+
+ if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
+ break;
+
+ handled = 1;
+
+ if (unlikely(!(irintr & ~IRINTR_ACTIVITY)))
+ break; /* nothing todo if only activity */
+
+ if (irintr&IRINTR_RPKTINT)
+ vlsi_rx_interrupt(ndev);
+
+ if (irintr&IRINTR_TPKTINT)
+ vlsi_tx_interrupt(ndev);
+
+ } while (--boguscount > 0);
+ spin_unlock_irqrestore(&idev->lock,flags);
+
+ if (boguscount <= 0)
+ net_info_ratelimited("%s: too much work in interrupt!\n",
+ __func__);
+ return IRQ_RETVAL(handled);
+}
+
+/********************************************************/
+
+static int vlsi_open(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ int err = -EAGAIN;
+ char hwname[32];
+
+ if (pci_request_regions(idev->pdev, drivername)) {
+ net_warn_ratelimited("%s: io resource busy\n", __func__);
+ goto errout;
+ }
+ ndev->base_addr = pci_resource_start(idev->pdev,0);
+ ndev->irq = idev->pdev->irq;
+
+ /* under some rare occasions the chip apparently comes up with
+ * IRQ's pending. We better w/c pending IRQ and disable them all
+ */
+
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+ if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
+ drivername, ndev)) {
+ net_warn_ratelimited("%s: couldn't get IRQ: %d\n",
+ __func__, ndev->irq);
+ goto errout_io;
+ }
+
+ if ((err = vlsi_create_hwif(idev)) != 0)
+ goto errout_irq;
+
+ sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
+ idev->irlap = irlap_open(ndev,&idev->qos,hwname);
+ if (!idev->irlap)
+ goto errout_free_ring;
+
+ idev->last_rx = ktime_get(); /* first mtt may start from now on */
+
+ idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
+
+ if ((err = vlsi_start_hw(idev)) != 0)
+ goto errout_close_irlap;
+
+ netif_start_queue(ndev);
+
+ net_info_ratelimited("%s: device %s operational\n",
+ __func__, ndev->name);
+
+ return 0;
+
+errout_close_irlap:
+ irlap_close(idev->irlap);
+errout_free_ring:
+ vlsi_destroy_hwif(idev);
+errout_irq:
+ free_irq(ndev->irq,ndev);
+errout_io:
+ pci_release_regions(idev->pdev);
+errout:
+ return err;
+}
+
+static int vlsi_close(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ if (idev->irlap)
+ irlap_close(idev->irlap);
+ idev->irlap = NULL;
+
+ vlsi_stop_hw(idev);
+
+ vlsi_destroy_hwif(idev);
+
+ free_irq(ndev->irq,ndev);
+
+ pci_release_regions(idev->pdev);
+
+ net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name);
+
+ return 0;
+}
+
+static const struct net_device_ops vlsi_netdev_ops = {
+ .ndo_open = vlsi_open,
+ .ndo_stop = vlsi_close,
+ .ndo_start_xmit = vlsi_hard_start_xmit,
+ .ndo_do_ioctl = vlsi_ioctl,
+ .ndo_tx_timeout = vlsi_tx_timeout,
+};
+
+static int vlsi_irda_init(struct net_device *ndev)
+{
+ vlsi_irda_dev_t *idev = netdev_priv(ndev);
+ struct pci_dev *pdev = idev->pdev;
+
+ ndev->irq = pdev->irq;
+ ndev->base_addr = pci_resource_start(pdev,0);
+
+ /* PCI busmastering
+ * see include file for details why we need these 2 masks, in this order!
+ */
+
+ if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) ||
+ pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
+ net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n",
+ __func__);
+ return -1;
+ }
+
+ irda_init_max_qos_capabilies(&idev->qos);
+
+ /* the VLSI82C147 does not support 576000! */
+
+ idev->qos.baud_rate.bits = IR_2400 | IR_9600
+ | IR_19200 | IR_38400 | IR_57600 | IR_115200
+ | IR_1152000 | (IR_4000000 << 8);
+
+ idev->qos.min_turn_time.bits = qos_mtt_bits;
+
+ irda_qos_bits_to_value(&idev->qos);
+
+ /* currently no public media definitions for IrDA */
+
+ ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
+ ndev->if_port = IF_PORT_UNKNOWN;
+
+ ndev->netdev_ops = &vlsi_netdev_ops;
+ ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ return 0;
+}
+
+/**************************************************************/
+
+static int
+vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ vlsi_irda_dev_t *idev;
+
+ if (pci_enable_device(pdev))
+ goto out;
+ else
+ pdev->current_state = 0; /* hw must be running now */
+
+ net_info_ratelimited("%s: IrDA PCI controller %s detected\n",
+ drivername, pci_name(pdev));
+
+ if ( !pci_resource_start(pdev,0) ||
+ !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
+ net_err_ratelimited("%s: bar 0 invalid", __func__);
+ goto out_disable;
+ }
+
+ ndev = alloc_irdadev(sizeof(*idev));
+ if (ndev==NULL) {
+ net_err_ratelimited("%s: Unable to allocate device memory.\n",
+ __func__);
+ goto out_disable;
+ }
+
+ idev = netdev_priv(ndev);
+
+ spin_lock_init(&idev->lock);
+ mutex_init(&idev->mtx);
+ mutex_lock(&idev->mtx);
+ idev->pdev = pdev;
+
+ if (vlsi_irda_init(ndev) < 0)
+ goto out_freedev;
+
+ if (register_netdev(ndev) < 0) {
+ net_err_ratelimited("%s: register_netdev failed\n", __func__);
+ goto out_freedev;
+ }
+
+ if (vlsi_proc_root != NULL) {
+ struct proc_dir_entry *ent;
+
+ ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
+ vlsi_proc_root, VLSI_PROC_FOPS, ndev);
+ if (!ent) {
+ net_warn_ratelimited("%s: failed to create proc entry\n",
+ __func__);
+ } else {
+ proc_set_size(ent, 0);
+ }
+ idev->proc_entry = ent;
+ }
+ net_info_ratelimited("%s: registered device %s\n",
+ drivername, ndev->name);
+
+ pci_set_drvdata(pdev, ndev);
+ mutex_unlock(&idev->mtx);
+
+ return 0;
+
+out_freedev:
+ mutex_unlock(&idev->mtx);
+ free_netdev(ndev);
+out_disable:
+ pci_disable_device(pdev);
+out:
+ return -ENODEV;
+}
+
+static void vlsi_irda_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ net_err_ratelimited("%s: lost netdevice?\n", drivername);
+ return;
+ }
+
+ unregister_netdev(ndev);
+
+ idev = netdev_priv(ndev);
+ mutex_lock(&idev->mtx);
+ if (idev->proc_entry) {
+ remove_proc_entry(ndev->name, vlsi_proc_root);
+ idev->proc_entry = NULL;
+ }
+ mutex_unlock(&idev->mtx);
+
+ free_netdev(ndev);
+
+ net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev));
+}
+
+#ifdef CONFIG_PM
+
+/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
+ * Some of the Linux PCI-PM code however depends on this, for example in
+ * pci_set_power_state(). So we have to take care to perform the required
+ * operations on our own (particularly reflecting the pdev->current_state)
+ * otherwise we might get cheated by pci-pm.
+ */
+
+
+static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
+ return 0;
+ }
+ idev = netdev_priv(ndev);
+ mutex_lock(&idev->mtx);
+ if (pdev->current_state != 0) { /* already suspended */
+ if (state.event > pdev->current_state) { /* simply go deeper */
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pdev->current_state = state.event;
+ }
+ else
+ net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n",
+ __func__, pci_name(pdev),
+ pdev->current_state, state.event);
+ mutex_unlock(&idev->mtx);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ vlsi_stop_hw(idev);
+ pci_save_state(pdev);
+ if (!idev->new_baud)
+ /* remember speed settings to restore on resume */
+ idev->new_baud = idev->baud;
+ }
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pdev->current_state = state.event;
+ idev->resume_ok = 1;
+ mutex_unlock(&idev->mtx);
+ return 0;
+}
+
+static int vlsi_irda_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ vlsi_irda_dev_t *idev;
+
+ if (!ndev) {
+ net_err_ratelimited("%s - %s: no netdevice\n",
+ __func__, pci_name(pdev));
+ return 0;
+ }
+ idev = netdev_priv(ndev);
+ mutex_lock(&idev->mtx);
+ if (pdev->current_state == 0) {
+ mutex_unlock(&idev->mtx);
+ net_warn_ratelimited("%s - %s: already resumed\n",
+ __func__, pci_name(pdev));
+ return 0;
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pdev->current_state = PM_EVENT_ON;
+
+ if (!idev->resume_ok) {
+ /* should be obsolete now - but used to happen due to:
+ * - pci layer initially setting pdev->current_state = 4 (unknown)
+ * - pci layer did not walk the save_state-tree (might be APM problem)
+ * so we could not refuse to suspend from undefined state
+ * - vlsi_irda_suspend detected invalid state and refused to save
+ * configuration for resume - but was too late to stop suspending
+ * - vlsi_irda_resume got screwed when trying to resume from garbage
+ *
+ * now we explicitly set pdev->current_state = 0 after enabling the
+ * device and independently resume_ok should catch any garbage config.
+ */
+ net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__);
+ mutex_unlock(&idev->mtx);
+ return 0;
+ }
+
+ if (netif_running(ndev)) {
+ pci_restore_state(pdev);
+ vlsi_start_hw(idev);
+ netif_device_attach(ndev);
+ }
+ idev->resume_ok = 0;
+ mutex_unlock(&idev->mtx);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+/*********************************************************/
+
+static struct pci_driver vlsi_irda_driver = {
+ .name = drivername,
+ .id_table = vlsi_irda_table,
+ .probe = vlsi_irda_probe,
+ .remove = vlsi_irda_remove,
+#ifdef CONFIG_PM
+ .suspend = vlsi_irda_suspend,
+ .resume = vlsi_irda_resume,
+#endif
+};
+
+#define PROC_DIR ("driver/" DRIVER_NAME)
+
+static int __init vlsi_mod_init(void)
+{
+ int i, ret;
+
+ if (clksrc < 0 || clksrc > 3) {
+ net_err_ratelimited("%s: invalid clksrc=%d\n",
+ drivername, clksrc);
+ return -1;
+ }
+
+ for (i = 0; i < 2; i++) {
+ switch(ringsize[i]) {
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n",
+ drivername,
+ i ? "rx" : "tx",
+ ringsize[i]);
+ ringsize[i] = 8;
+ break;
+ }
+ }
+
+ sirpulse = !!sirpulse;
+
+ /* proc_mkdir returns NULL if !CONFIG_PROC_FS.
+ * Failure to create the procfs entry is handled like running
+ * without procfs - it's not required for the driver to work.
+ */
+ vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
+
+ ret = pci_register_driver(&vlsi_irda_driver);
+
+ if (ret && vlsi_proc_root)
+ remove_proc_entry(PROC_DIR, NULL);
+ return ret;
+
+}
+
+static void __exit vlsi_mod_exit(void)
+{
+ pci_unregister_driver(&vlsi_irda_driver);
+ if (vlsi_proc_root)
+ remove_proc_entry(PROC_DIR, NULL);
+}
+
+module_init(vlsi_mod_init);
+module_exit(vlsi_mod_exit);
diff --git a/drivers/staging/irda/drivers/vlsi_ir.h b/drivers/staging/irda/drivers/vlsi_ir.h
new file mode 100644
index 000000000000..f9db2ce4c5c6
--- /dev/null
+++ b/drivers/staging/irda/drivers/vlsi_ir.h
@@ -0,0 +1,757 @@
+
+/*********************************************************************
+ *
+ * vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
+ *
+ * Version: 0.5
+ *
+ * Copyright (c) 2001-2003 Martin Diehl
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ ********************************************************************/
+
+#ifndef IRDA_VLSI_FIR_H
+#define IRDA_VLSI_FIR_H
+
+/* ================================================================
+ * compatibility stuff
+ */
+
+/* definitions not present in pci_ids.h */
+
+#ifndef PCI_CLASS_WIRELESS_IRDA
+#define PCI_CLASS_WIRELESS_IRDA 0x0d00
+#endif
+
+#ifndef PCI_CLASS_SUBCLASS_MASK
+#define PCI_CLASS_SUBCLASS_MASK 0xffff
+#endif
+
+/* ================================================================ */
+
+/* non-standard PCI registers */
+
+enum vlsi_pci_regs {
+ VLSI_PCI_CLKCTL = 0x40, /* chip clock input control */
+ VLSI_PCI_MSTRPAGE = 0x41, /* addr [31:24] for all busmaster cycles */
+ VLSI_PCI_IRMISC = 0x42 /* mainly legacy UART related */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PCI_CLKCTL: Clock Control Register (u8, rw) */
+
+/* Three possible clock sources: either on-chip 48MHz PLL or
+ * external clock applied to EXTCLK pin. External clock may
+ * be either 48MHz or 40MHz, which is indicated by XCKSEL.
+ * CLKSTP controls whether the selected clock source gets
+ * connected to the IrDA block.
+ *
+ * On my HP OB-800 the BIOS sets external 40MHz clock as source
+ * when IrDA enabled and I've never detected any PLL lock success.
+ * Apparently the 14.3...MHz OSC input required for the PLL to work
+ * is not connected and the 40MHz EXTCLK is provided externally.
+ * At least this is what makes the driver working for me.
+ */
+
+enum vlsi_pci_clkctl {
+
+ /* PLL control */
+
+ CLKCTL_PD_INV = 0x04, /* PD#: inverted power down signal,
+ * i.e. PLL is powered, if PD_INV set */
+ CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
+
+ /* clock source selection */
+
+ CLKCTL_EXTCLK = 0x20, /* set to select external clock input, not PLL */
+ CLKCTL_XCKSEL = 0x10, /* set to indicate EXTCLK is 40MHz, not 48MHz */
+
+ /* IrDA block control */
+
+ CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
+ CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
+ * is detected, PD_INV gets set(?) and CLKSTP cleared */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PCI_MSTRPAGE: Master Page Register (u8, rw) and busmastering stuff */
+
+#define DMA_MASK_USED_BY_HW 0xffffffff
+#define DMA_MASK_MSTRPAGE 0x00ffffff
+#define MSTRPAGE_VALUE (DMA_MASK_MSTRPAGE >> 24)
+
+ /* PCI busmastering is somewhat special for this guy - in short:
+ *
+ * We select to operate using fixed MSTRPAGE=0, use ISA DMA
+ * address restrictions to make the PCI BM api aware of this,
+ * but ensure the hardware is dealing with real 32bit access.
+ *
+ * In detail:
+ * The chip executes normal 32bit busmaster cycles, i.e.
+ * drives all 32 address lines. These addresses however are
+ * composed of [0:23] taken from various busaddr-pointers
+ * and [24:31] taken from the MSTRPAGE register in the VLSI82C147
+ * config space. Therefore _all_ busmastering must be
+ * targeted to/from one single 16MB (busaddr-) superpage!
+ * The point is to make sure all the allocations for memory
+ * locations with busmaster access (ring descriptors, buffers)
+ * are indeed bus-mappable to the same 16MB range (for x86 this
+ * means they must reside in the same 16MB physical memory address
+ * range). The only constraint we have which supports "several objects
+ * mappable to common 16MB range" paradigma, is the old ISA DMA
+ * restriction to the first 16MB of physical address range.
+ * Hence the approach here is to enable PCI busmaster support using
+ * the correct 32bit dma-mask used by the chip. Afterwards the device's
+ * dma-mask gets restricted to 24bit, which must be honoured somehow by
+ * all allocations for memory areas to be exposed to the chip ...
+ *
+ * Note:
+ * Don't be surprised to get "Setting latency timer..." messages every
+ * time when PCI busmastering is enabled for the chip.
+ * The chip has its PCI latency timer RO fixed at 0 - which is not a
+ * problem here, because it is never requesting _burst_ transactions.
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PCIIRMISC: IR Miscellaneous Register (u8, rw) */
+
+/* legacy UART emulation - not used by this driver - would require:
+ * (see below for some register-value definitions)
+ *
+ * - IRMISC_UARTEN must be set to enable UART address decoding
+ * - IRMISC_UARTSEL configured
+ * - IRCFG_MASTER must be cleared
+ * - IRCFG_SIR must be set
+ * - IRENABLE_PHYANDCLOCK must be asserted 0->1 (and hence IRENABLE_SIR_ON)
+ */
+
+enum vlsi_pci_irmisc {
+
+ /* IR transceiver control */
+
+ IRMISC_IRRAIL = 0x40, /* (ro?) IR rail power indication (and control?)
+ * 0=3.3V / 1=5V. Probably set during power-on?
+ * unclear - not touched by driver */
+ IRMISC_IRPD = 0x08, /* transceiver power down, if set */
+
+ /* legacy UART control */
+
+ IRMISC_UARTTST = 0x80, /* UART test mode - "always write 0" */
+ IRMISC_UARTEN = 0x04, /* enable UART address decoding */
+
+ /* bits [1:0] IRMISC_UARTSEL to select legacy UART address */
+
+ IRMISC_UARTSEL_3f8 = 0x00,
+ IRMISC_UARTSEL_2f8 = 0x01,
+ IRMISC_UARTSEL_3e8 = 0x02,
+ IRMISC_UARTSEL_2e8 = 0x03
+};
+
+/* ================================================================ */
+
+/* registers mapped to 32 byte PCI IO space */
+
+/* note: better access all registers at the indicated u8/u16 size
+ * although some of them contain only 1 byte of information.
+ * some of them (particaluarly PROMPT and IRCFG) ignore
+ * access when using the wrong addressing mode!
+ */
+
+enum vlsi_pio_regs {
+ VLSI_PIO_IRINTR = 0x00, /* interrupt enable/request (u8, rw) */
+ VLSI_PIO_RINGPTR = 0x02, /* rx/tx ring pointer (u16, ro) */
+ VLSI_PIO_RINGBASE = 0x04, /* [23:10] of ring address (u16, rw) */
+ VLSI_PIO_RINGSIZE = 0x06, /* rx/tx ring size (u16, rw) */
+ VLSI_PIO_PROMPT = 0x08, /* triggers ring processing (u16, wo) */
+ /* 0x0a-0x0f: reserved / duplicated UART regs */
+ VLSI_PIO_IRCFG = 0x10, /* configuration select (u16, rw) */
+ VLSI_PIO_SIRFLAG = 0x12, /* BOF/EOF for filtered SIR (u16, ro) */
+ VLSI_PIO_IRENABLE = 0x14, /* enable and status register (u16, rw/ro) */
+ VLSI_PIO_PHYCTL = 0x16, /* physical layer current status (u16, ro) */
+ VLSI_PIO_NPHYCTL = 0x18, /* next physical layer select (u16, rw) */
+ VLSI_PIO_MAXPKT = 0x1a, /* [11:0] max len for packet receive (u16, rw) */
+ VLSI_PIO_RCVBCNT = 0x1c /* current receive-FIFO byte count (u16, ro) */
+ /* 0x1e-0x1f: reserved / duplicated UART regs */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRINTR: Interrupt Register (u8, rw) */
+
+/* enable-bits:
+ * 1 = enable / 0 = disable
+ * interrupt condition bits:
+ * set according to corresponding interrupt source
+ * (regardless of the state of the enable bits)
+ * enable bit status indicates whether interrupt gets raised
+ * write-to-clear
+ * note: RPKTINT and TPKTINT behave different in legacy UART mode (which we don't use :-)
+ */
+
+enum vlsi_pio_irintr {
+ IRINTR_ACTEN = 0x80, /* activity interrupt enable */
+ IRINTR_ACTIVITY = 0x40, /* activity monitor (traffic detected) */
+ IRINTR_RPKTEN = 0x20, /* receive packet interrupt enable*/
+ IRINTR_RPKTINT = 0x10, /* rx-packet transferred from fifo to memory finished */
+ IRINTR_TPKTEN = 0x08, /* transmit packet interrupt enable */
+ IRINTR_TPKTINT = 0x04, /* last bit of tx-packet+crc shifted to ir-pulser */
+ IRINTR_OE_EN = 0x02, /* UART rx fifo overrun error interrupt enable */
+ IRINTR_OE_INT = 0x01 /* UART rx fifo overrun error (read LSR to clear) */
+};
+
+/* we use this mask to check whether the (shared PCI) interrupt is ours */
+
+#define IRINTR_INT_MASK (IRINTR_ACTIVITY|IRINTR_RPKTINT|IRINTR_TPKTINT)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGPTR: Ring Pointer Read-Back Register (u16, ro) */
+
+/* _both_ ring pointers are indices relative to the _entire_ rx,tx-ring!
+ * i.e. the referenced descriptor is located
+ * at RINGBASE + PTR * sizeof(descr) for rx and tx
+ * therefore, the tx-pointer has offset MAX_RING_DESCR
+ */
+
+#define MAX_RING_DESCR 64 /* tx, rx rings may contain up to 64 descr each */
+
+#define RINGPTR_RX_MASK (MAX_RING_DESCR-1)
+#define RINGPTR_TX_MASK ((MAX_RING_DESCR-1)<<8)
+
+#define RINGPTR_GET_RX(p) ((p)&RINGPTR_RX_MASK)
+#define RINGPTR_GET_TX(p) (((p)&RINGPTR_TX_MASK)>>8)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGBASE: Ring Pointer Base Address Register (u16, ro) */
+
+/* Contains [23:10] part of the ring base (bus-) address
+ * which must be 1k-alinged. [31:24] is taken from
+ * VLSI_PCI_MSTRPAGE above.
+ * The controller initiates non-burst PCI BM cycles to
+ * fetch and update the descriptors in the ring.
+ * Once fetched, the descriptor remains cached onchip
+ * until it gets closed and updated due to the ring
+ * processing state machine.
+ * The entire ring area is split in rx and tx areas with each
+ * area consisting of 64 descriptors of 8 bytes each.
+ * The rx(tx) ring is located at ringbase+0 (ringbase+64*8).
+ */
+
+#define BUS_TO_RINGBASE(p) (((p)>>10)&0x3fff)
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RINGSIZE: Ring Size Register (u16, rw) */
+
+/* bit mask to indicate the ring size to be used for rx and tx.
+ * possible values encoded bits
+ * 4 0000
+ * 8 0001
+ * 16 0011
+ * 32 0111
+ * 64 1111
+ * located at [15:12] for tx and [11:8] for rx ([7:0] unused)
+ *
+ * note: probably a good idea to have IRCFG_MSTR cleared when writing
+ * this so the state machines are stopped and the RINGPTR is reset!
+ */
+
+#define SIZE_TO_BITS(num) ((((num)-1)>>2)&0x0f)
+#define TX_RX_TO_RINGSIZE(tx,rx) ((SIZE_TO_BITS(tx)<<12)|(SIZE_TO_BITS(rx)<<8))
+#define RINGSIZE_TO_RXSIZE(rs) ((((rs)&0x0f00)>>6)+4)
+#define RINGSIZE_TO_TXSIZE(rs) ((((rs)&0xf000)>>10)+4)
+
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_PROMPT: Ring Prompting Register (u16, write-to-start) */
+
+/* writing any value kicks the ring processing state machines
+ * for both tx, rx rings as follows:
+ * - active rings (currently owning an active descriptor)
+ * ignore the prompt and continue
+ * - idle rings fetch the next descr from the ring and start
+ * their processing
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRCFG: IR Config Register (u16, rw) */
+
+/* notes:
+ * - not more than one SIR/MIR/FIR bit must be set at any time
+ * - SIR, MIR, FIR and CRC16 select the configuration which will
+ * be applied on next 0->1 transition of IRENABLE_PHYANDCLOCK (see below).
+ * - besides allowing the PCI interface to execute busmaster cycles
+ * and therefore the ring SM to operate, the MSTR bit has side-effects:
+ * when MSTR is cleared, the RINGPTR's get reset and the legacy UART mode
+ * (in contrast to busmaster access mode) gets enabled.
+ * - clearing ENRX or setting ENTX while data is received may stall the
+ * receive fifo until ENRX reenabled _and_ another packet arrives
+ * - SIRFILT means the chip performs the required unwrapping of hardware
+ * headers (XBOF's, BOF/EOF) and un-escaping in the _receive_ direction.
+ * Only the resulting IrLAP payload is copied to the receive buffers -
+ * but with the 16bit FCS still encluded. Question remains, whether it
+ * was already checked or we should do it before passing the packet to IrLAP?
+ */
+
+enum vlsi_pio_ircfg {
+ IRCFG_LOOP = 0x4000, /* enable loopback test mode */
+ IRCFG_ENTX = 0x1000, /* transmit enable */
+ IRCFG_ENRX = 0x0800, /* receive enable */
+ IRCFG_MSTR = 0x0400, /* master enable */
+ IRCFG_RXANY = 0x0200, /* receive any packet */
+ IRCFG_CRC16 = 0x0080, /* 16bit (not 32bit) CRC select for MIR/FIR */
+ IRCFG_FIR = 0x0040, /* FIR 4PPM encoding mode enable */
+ IRCFG_MIR = 0x0020, /* MIR HDLC encoding mode enable */
+ IRCFG_SIR = 0x0010, /* SIR encoding mode enable */
+ IRCFG_SIRFILT = 0x0008, /* enable SIR decode filter (receiver unwrapping) */
+ IRCFG_SIRTEST = 0x0004, /* allow SIR decode filter when not in SIR mode */
+ IRCFG_TXPOL = 0x0002, /* invert tx polarity when set */
+ IRCFG_RXPOL = 0x0001 /* invert rx polarity when set */
+};
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_SIRFLAG: SIR Flag Register (u16, ro) */
+
+/* register contains hardcoded BOF=0xc0 at [7:0] and EOF=0xc1 at [15:8]
+ * which is used for unwrapping received frames in SIR decode-filter mode
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_IRENABLE: IR Enable Register (u16, rw/ro) */
+
+/* notes:
+ * - IREN acts as gate for latching the configured IR mode information
+ * from IRCFG and IRPHYCTL when IREN=reset and applying them when
+ * IREN gets set afterwards.
+ * - ENTXST reflects IRCFG_ENTX
+ * - ENRXST = IRCFG_ENRX && (!IRCFG_ENTX || IRCFG_LOOP)
+ */
+
+enum vlsi_pio_irenable {
+ IRENABLE_PHYANDCLOCK = 0x8000, /* enable IR phy and gate the mode config (rw) */
+ IRENABLE_CFGER = 0x4000, /* mode configuration error (ro) */
+ IRENABLE_FIR_ON = 0x2000, /* FIR on status (ro) */
+ IRENABLE_MIR_ON = 0x1000, /* MIR on status (ro) */
+ IRENABLE_SIR_ON = 0x0800, /* SIR on status (ro) */
+ IRENABLE_ENTXST = 0x0400, /* transmit enable status (ro) */
+ IRENABLE_ENRXST = 0x0200, /* Receive enable status (ro) */
+ IRENABLE_CRC16_ON = 0x0100 /* 16bit (not 32bit) CRC enabled status (ro) */
+};
+
+#define IRENABLE_MASK 0xff00 /* Read mask */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_PHYCTL: IR Physical Layer Current Control Register (u16, ro) */
+
+/* read-back of the currently applied physical layer status.
+ * applied from VLSI_PIO_NPHYCTL at rising edge of IRENABLE_PHYANDCLOCK
+ * contents identical to VLSI_PIO_NPHYCTL (see below)
+ */
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_NPHYCTL: IR Physical Layer Next Control Register (u16, rw) */
+
+/* latched during IRENABLE_PHYANDCLOCK=0 and applied at 0-1 transition
+ *
+ * consists of BAUD[15:10], PLSWID[9:5] and PREAMB[4:0] bits defined as follows:
+ *
+ * SIR-mode: BAUD = (115.2kHz / baudrate) - 1
+ * PLSWID = (pulsetime * freq / (BAUD+1)) - 1
+ * where pulsetime is the requested IrPHY pulse width
+ * and freq is 8(16)MHz for 40(48)MHz primary input clock
+ * PREAMB: don't care for SIR
+ *
+ * The nominal SIR pulse width is 3/16 bit time so we have PLSWID=12
+ * fixed for all SIR speeds at 40MHz input clock (PLSWID=24 at 48MHz).
+ * IrPHY also allows shorter pulses down to the nominal pulse duration
+ * at 115.2kbaud (minus some tolerance) which is 1.41 usec.
+ * Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by two for 48MHz)
+ * we get the minimum acceptable PLSWID values according to the VLSI
+ * specification, which provides 1.5 usec pulse width for all speeds (except
+ * for 2.4kbaud getting 6usec). This is fine with IrPHY v1.3 specs and
+ * reduces the transceiver power which drains the battery. At 9.6kbaud for
+ * example this amounts to more than 90% battery power saving!
+ *
+ * MIR-mode: BAUD = 0
+ * PLSWID = 9(10) for 40(48) MHz input clock
+ * to get nominal MIR pulse width
+ * PREAMB = 1
+ *
+ * FIR-mode: BAUD = 0
+ * PLSWID: don't care
+ * PREAMB = 15
+ */
+
+#define PHYCTL_BAUD_SHIFT 10
+#define PHYCTL_BAUD_MASK 0xfc00
+#define PHYCTL_PLSWID_SHIFT 5
+#define PHYCTL_PLSWID_MASK 0x03e0
+#define PHYCTL_PREAMB_SHIFT 0
+#define PHYCTL_PREAMB_MASK 0x001f
+
+#define PHYCTL_TO_BAUD(bwp) (((bwp)&PHYCTL_BAUD_MASK)>>PHYCTL_BAUD_SHIFT)
+#define PHYCTL_TO_PLSWID(bwp) (((bwp)&PHYCTL_PLSWID_MASK)>>PHYCTL_PLSWID_SHIFT)
+#define PHYCTL_TO_PREAMB(bwp) (((bwp)&PHYCTL_PREAMB_MASK)>>PHYCTL_PREAMB_SHIFT)
+
+#define BWP_TO_PHYCTL(b,w,p) ((((b)<<PHYCTL_BAUD_SHIFT)&PHYCTL_BAUD_MASK) \
+ | (((w)<<PHYCTL_PLSWID_SHIFT)&PHYCTL_PLSWID_MASK) \
+ | (((p)<<PHYCTL_PREAMB_SHIFT)&PHYCTL_PREAMB_MASK))
+
+#define BAUD_BITS(br) ((115200/(br))-1)
+
+static inline unsigned
+calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
+{
+ unsigned tmp;
+
+ if (widthselect) /* nominal 3/16 puls width */
+ return (clockselect) ? 12 : 24;
+
+ tmp = ((clockselect) ? 12 : 24) / (BAUD_BITS(baudrate)+1);
+
+ /* intermediate result of integer division needed here */
+
+ return (tmp>0) ? (tmp-1) : 0;
+}
+
+#define PHYCTL_SIR(br,ws,cs) BWP_TO_PHYCTL(BAUD_BITS(br),calc_width_bits((br),(ws),(cs)),0)
+#define PHYCTL_MIR(cs) BWP_TO_PHYCTL(0,((cs)?9:10),1)
+#define PHYCTL_FIR BWP_TO_PHYCTL(0,0,15)
+
+/* quite ugly, I know. But implementing these calculations here avoids
+ * having magic numbers in the code and allows some playing with pulsewidths
+ * without risk to violate the standards.
+ * FWIW, here is the table for reference:
+ *
+ * baudrate BAUD min-PLSWID nom-PLSWID PREAMB
+ * 2400 47 0(0) 12(24) 0
+ * 9600 11 0(0) 12(24) 0
+ * 19200 5 1(2) 12(24) 0
+ * 38400 2 3(6) 12(24) 0
+ * 57600 1 5(10) 12(24) 0
+ * 115200 0 11(22) 12(24) 0
+ * MIR 0 - 9(10) 1
+ * FIR 0 - 0 15
+ *
+ * note: x(y) means x-value for 40MHz / y-value for 48MHz primary input clock
+ */
+
+/* ------------------------------------------ */
+
+
+/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
+
+/* maximum acceptable length for received packets */
+
+/* hw imposed limitation - register uses only [11:0] */
+#define MAX_PACKET_LENGTH 0x0fff
+
+/* IrLAP I-field (apparently not defined elsewhere) */
+#define IRDA_MTU 2048
+
+/* complete packet consists of A(1)+C(1)+I(<=IRDA_MTU) */
+#define IRLAP_SKB_ALLOCSIZE (1+1+IRDA_MTU)
+
+/* the buffers we use to exchange frames with the hardware need to be
+ * larger than IRLAP_SKB_ALLOCSIZE because we may have up to 4 bytes FCS
+ * appended and, in SIR mode, a lot of frame wrapping bytes. The worst
+ * case appears to be a SIR packet with I-size==IRDA_MTU and all bytes
+ * requiring to be escaped to provide transparency. Furthermore, the peer
+ * might ask for quite a number of additional XBOFs:
+ * up to 115+48 XBOFS 163
+ * regular BOF 1
+ * A-field 1
+ * C-field 1
+ * I-field, IRDA_MTU, all escaped 4096
+ * FCS (16 bit at SIR, escaped) 4
+ * EOF 1
+ * AFAICS nothing in IrLAP guarantees A/C field not to need escaping
+ * (f.e. 0xc0/0xc1 - i.e. BOF/EOF - are legal values there) so in the
+ * worst case we have 4269 bytes total frame size.
+ * However, the VLSI uses 12 bits only for all buffer length values,
+ * which limits the maximum useable buffer size <= 4095.
+ * Note this is not a limitation in the receive case because we use
+ * the SIR filtering mode where the hw unwraps the frame and only the
+ * bare packet+fcs is stored into the buffer - in contrast to the SIR
+ * tx case where we have to pass frame-wrapped packets to the hw.
+ * If this would ever become an issue in real life, the only workaround
+ * I see would be using the legacy UART emulation in SIR mode.
+ */
+
+#define XFER_BUF_SIZE MAX_PACKET_LENGTH
+
+/* ------------------------------------------ */
+
+/* VLSI_PIO_RCVBCNT: Receive Byte Count Register (u16, ro) */
+
+/* receive packet counter gets incremented on every non-filtered
+ * byte which was put in the receive fifo and reset for each
+ * new packet. Used to decide whether we are just in the middle
+ * of receiving
+ */
+
+/* better apply the [11:0] mask when reading, as some docs say the
+ * reserved [15:12] would return 1 when reading - which is wrong AFAICS
+ */
+#define RCVBCNT_MASK 0x0fff
+
+/******************************************************************/
+
+/* descriptors for rx/tx ring
+ *
+ * accessed by hardware - don't change!
+ *
+ * the descriptor is owned by hardware, when the ACTIVE status bit
+ * is set and nothing (besides reading status to test the bit)
+ * shall be done. The bit gets cleared by hw, when the descriptor
+ * gets closed. Premature reaping of descriptors owned be the chip
+ * can be achieved by disabling IRCFG_MSTR
+ *
+ * Attention: Writing addr overwrites status!
+ *
+ * ### FIXME: depends on endianess (but there ain't no non-i586 ob800 ;-)
+ */
+
+struct ring_descr_hw {
+ volatile __le16 rd_count; /* tx/rx count [11:0] */
+ __le16 reserved;
+ union {
+ __le32 addr; /* [23:0] of the buffer's busaddress */
+ struct {
+ u8 addr_res[3];
+ volatile u8 status; /* descriptor status */
+ } __packed rd_s;
+ } __packed rd_u;
+} __packed;
+
+#define rd_addr rd_u.addr
+#define rd_status rd_u.rd_s.status
+
+/* ring descriptor status bits */
+
+#define RD_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
+
+/* TX ring descriptor status */
+
+#define RD_TX_DISCRC 0x40 /* do not send CRC (for SIR) */
+#define RD_TX_BADCRC 0x20 /* force a bad CRC */
+#define RD_TX_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
+#define RD_TX_FRCEUND 0x08 /* force underrun */
+#define RD_TX_CLRENTX 0x04 /* clear ENTX after this frame */
+#define RD_TX_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
+
+/* RX ring descriptor status */
+
+#define RD_RX_PHYERR 0x40 /* physical encoding error */
+#define RD_RX_CRCERR 0x20 /* CRC error (MIR/FIR) */
+#define RD_RX_LENGTH 0x10 /* frame exceeds buffer length */
+#define RD_RX_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
+#define RD_RX_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
+
+#define RD_RX_ERROR 0x7c /* any error in received frame */
+
+/* the memory required to hold the 2 descriptor rings */
+#define HW_RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr_hw))
+
+/******************************************************************/
+
+/* sw-ring descriptors consists of a bus-mapped transfer buffer with
+ * associated skb and a pointer to the hw entry descriptor
+ */
+
+struct ring_descr {
+ struct ring_descr_hw *hw;
+ struct sk_buff *skb;
+ void *buf;
+};
+
+/* wrappers for operations on hw-exposed ring descriptors
+ * access to the hw-part of the descriptors must use these.
+ */
+
+static inline int rd_is_active(struct ring_descr *rd)
+{
+ return (rd->hw->rd_status & RD_ACTIVE) != 0;
+}
+
+static inline void rd_activate(struct ring_descr *rd)
+{
+ rd->hw->rd_status |= RD_ACTIVE;
+}
+
+static inline void rd_set_status(struct ring_descr *rd, u8 s)
+{
+ rd->hw->rd_status = s; /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
+{
+ /* order is important for two reasons:
+ * - overlayed: writing addr overwrites status
+ * - we want to write status last so we have valid address in
+ * case status has RD_ACTIVE set
+ */
+
+ if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
+ net_err_ratelimited("%s: pci busaddr inconsistency!\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
+ * to status - just in case MSTRPAGE_VALUE!=0
+ */
+ rd->hw->rd_addr = cpu_to_le32(a);
+ wmb();
+ rd_set_status(rd, s); /* may pass ownership to the hardware */
+}
+
+static inline void rd_set_count(struct ring_descr *rd, u16 c)
+{
+ rd->hw->rd_count = cpu_to_le16(c);
+}
+
+static inline u8 rd_get_status(struct ring_descr *rd)
+{
+ return rd->hw->rd_status;
+}
+
+static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
+{
+ dma_addr_t a;
+
+ a = le32_to_cpu(rd->hw->rd_addr);
+ return (a & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
+}
+
+static inline u16 rd_get_count(struct ring_descr *rd)
+{
+ return le16_to_cpu(rd->hw->rd_count);
+}
+
+/******************************************************************/
+
+/* sw descriptor rings for rx, tx:
+ *
+ * operations follow producer-consumer paradigm, with the hw
+ * in the middle doing the processing.
+ * ring size must be power of two.
+ *
+ * producer advances r->tail after inserting for processing
+ * consumer advances r->head after removing processed rd
+ * ring is empty if head==tail / full if (tail+1)==head
+ */
+
+struct vlsi_ring {
+ struct pci_dev *pdev;
+ int dir;
+ unsigned len;
+ unsigned size;
+ unsigned mask;
+ atomic_t head, tail;
+ struct ring_descr *rd;
+};
+
+/* ring processing helpers */
+
+static inline struct ring_descr *ring_last(struct vlsi_ring *r)
+{
+ int t;
+
+ t = atomic_read(&r->tail) & r->mask;
+ return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
+}
+
+static inline struct ring_descr *ring_put(struct vlsi_ring *r)
+{
+ atomic_inc(&r->tail);
+ return ring_last(r);
+}
+
+static inline struct ring_descr *ring_first(struct vlsi_ring *r)
+{
+ int h;
+
+ h = atomic_read(&r->head) & r->mask;
+ return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
+}
+
+static inline struct ring_descr *ring_get(struct vlsi_ring *r)
+{
+ atomic_inc(&r->head);
+ return ring_first(r);
+}
+
+/******************************************************************/
+
+/* our private compound VLSI-PCI-IRDA device information */
+
+typedef struct vlsi_irda_dev {
+ struct pci_dev *pdev;
+
+ struct irlap_cb *irlap;
+
+ struct qos_info qos;
+
+ unsigned mode;
+ int baud, new_baud;
+
+ dma_addr_t busaddr;
+ void *virtaddr;
+ struct vlsi_ring *tx_ring, *rx_ring;
+
+ ktime_t last_rx;
+
+ spinlock_t lock;
+ struct mutex mtx;
+
+ u8 resume_ok;
+ struct proc_dir_entry *proc_entry;
+
+} vlsi_irda_dev_t;
+
+/********************************************************/
+
+/* the remapped error flags we use for returning from frame
+ * post-processing in vlsi_process_tx/rx() after it was completed
+ * by the hardware. These functions either return the >=0 number
+ * of transferred bytes in case of success or the negative (-)
+ * of the or'ed error flags.
+ */
+
+#define VLSI_TX_DROP 0x0001
+#define VLSI_TX_FIFO 0x0002
+
+#define VLSI_RX_DROP 0x0100
+#define VLSI_RX_OVER 0x0200
+#define VLSI_RX_LENGTH 0x0400
+#define VLSI_RX_FRAME 0x0800
+#define VLSI_RX_CRC 0x1000
+
+/********************************************************/
+
+#endif /* IRDA_VLSI_FIR_H */
+
diff --git a/drivers/staging/irda/drivers/w83977af.h b/drivers/staging/irda/drivers/w83977af.h
new file mode 100644
index 000000000000..04476c2e9121
--- /dev/null
+++ b/drivers/staging/irda/drivers/w83977af.h
@@ -0,0 +1,53 @@
+#ifndef W83977AF_H
+#define W83977AF_H
+
+#define W977_EFIO_BASE 0x370
+#define W977_EFIO2_BASE 0x3f0
+#define W977_DEVICE_IR 0x06
+
+
+/*
+ * Enter extended function mode
+ */
+static inline void w977_efm_enter(unsigned int efio)
+{
+ outb(0x87, efio);
+ outb(0x87, efio);
+}
+
+/*
+ * Select a device to configure
+ */
+
+static inline void w977_select_device(__u8 devnum, unsigned int efio)
+{
+ outb(0x07, efio);
+ outb(devnum, efio+1);
+}
+
+/*
+ * Write a byte to a register
+ */
+static inline void w977_write_reg(__u8 reg, __u8 value, unsigned int efio)
+{
+ outb(reg, efio);
+ outb(value, efio+1);
+}
+
+/*
+ * read a byte from a register
+ */
+static inline __u8 w977_read_reg(__u8 reg, unsigned int efio)
+{
+ outb(reg, efio);
+ return inb(efio+1);
+}
+
+/*
+ * Exit extended function mode
+ */
+static inline void w977_efm_exit(unsigned int efio)
+{
+ outb(0xAA, efio);
+}
+#endif
diff --git a/drivers/staging/irda/drivers/w83977af_ir.c b/drivers/staging/irda/drivers/w83977af_ir.c
new file mode 100644
index 000000000000..282b6c9ae05b
--- /dev/null
+++ b/drivers/staging/irda/drivers/w83977af_ir.c
@@ -0,0 +1,1285 @@
+/*********************************************************************
+ *
+ * Filename: w83977af_ir.c
+ * Version: 1.0
+ * Description: FIR driver for the Winbond W83977AF Super I/O chip
+ * Status: Experimental.
+ * Author: Paul VanderSpek
+ * Created at: Wed Nov 4 11:46:16 1998
+ * Modified at: Fri Jan 28 12:10:59 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
+ * Copyright (c) 1998-1999 Rebel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
+ * warranty for any of this software. This material is provided "AS-IS"
+ * and at no charge.
+ *
+ * If you find bugs in this file, its very likely that the same bug
+ * will also be in pc87108.c since the implementations are quite
+ * similar.
+ *
+ * Notice that all functions that needs to access the chip in _any_
+ * way, must save BSR register on entry, and restore it on exit.
+ * It is _very_ important to follow this policy!
+ *
+ * __u8 bank;
+ *
+ * bank = inb( iobase+BSR);
+ *
+ * do_your_stuff_here();
+ *
+ * outb( bank, iobase+BSR);
+ *
+ ********************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/rtnetlink.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include "w83977af.h"
+#include "w83977af_ir.h"
+
+#define CONFIG_USE_W977_PNP /* Currently needed */
+#define PIO_MAX_SPEED 115200
+
+static char *driver_name = "w83977af_ir";
+static int qos_mtt_bits = 0x07; /* 1 ms or more */
+
+#define CHIP_IO_EXTENT 8
+
+static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
+#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
+static unsigned int irq[] = { 6, 0, 0, 0 };
+#else
+static unsigned int irq[] = { 11, 0, 0, 0 };
+#endif
+static unsigned int dma[] = { 1, 0, 0, 0 };
+static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
+static unsigned int efio = W977_EFIO_BASE;
+
+static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
+
+/* Some prototypes */
+static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma);
+static int w83977af_close(struct w83977af_ir *self);
+static int w83977af_probe(int iobase, int irq, int dma);
+static int w83977af_dma_receive(struct w83977af_ir *self);
+static int w83977af_dma_receive_complete(struct w83977af_ir *self);
+static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
+static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
+static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
+static int w83977af_is_receiving(struct w83977af_ir *self);
+
+static int w83977af_net_open(struct net_device *dev);
+static int w83977af_net_close(struct net_device *dev);
+static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+/*
+ * Function w83977af_init ()
+ *
+ * Initialize chip. Just try to find out how many chips we are dealing with
+ * and where they are
+ */
+static int __init w83977af_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
+ if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/*
+ * Function w83977af_cleanup ()
+ *
+ * Close all configured chips
+ *
+ */
+static void __exit w83977af_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
+ if (dev_self[i])
+ w83977af_close(dev_self[i]);
+ }
+}
+
+static const struct net_device_ops w83977_netdev_ops = {
+ .ndo_open = w83977af_net_open,
+ .ndo_stop = w83977af_net_close,
+ .ndo_start_xmit = w83977af_hard_xmit,
+ .ndo_do_ioctl = w83977af_net_ioctl,
+};
+
+/*
+ * Function w83977af_open (iobase, irq)
+ *
+ * Open driver instance
+ *
+ */
+static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
+ unsigned int dma)
+{
+ struct net_device *dev;
+ struct w83977af_ir *self;
+ int err;
+
+ /* Lock the port that we need */
+ if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
+ pr_debug("%s: can't get iobase of 0x%03x\n",
+ __func__, iobase);
+ return -ENODEV;
+ }
+
+ if (w83977af_probe(iobase, irq, dma) == -1) {
+ err = -1;
+ goto err_out;
+ }
+ /*
+ * Allocate new instance of the driver
+ */
+ dev = alloc_irdadev(sizeof(struct w83977af_ir));
+ if (!dev) {
+ pr_err("IrDA: Can't allocate memory for IrDA control block!\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ self = netdev_priv(dev);
+ spin_lock_init(&self->lock);
+
+ /* Initialize IO */
+ self->io.fir_base = iobase;
+ self->io.irq = irq;
+ self->io.fir_ext = CHIP_IO_EXTENT;
+ self->io.dma = dma;
+ self->io.fifo_size = 32;
+
+ /* Initialize QoS for this device */
+ irda_init_max_qos_capabilies(&self->qos);
+
+ /* The only value we must override it the baudrate */
+
+ /* FIXME: The HP HDLS-1100 does not support 1152000! */
+ self->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | IR_57600 |
+ IR_115200 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
+
+ /* The HP HDLS-1100 needs 1 ms according to the specs */
+ self->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&self->qos);
+
+ /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
+ self->rx_buff.truesize = 14384;
+ self->tx_buff.truesize = 4000;
+
+ /* Allocate memory if needed */
+ self->rx_buff.head =
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
+ if (!self->rx_buff.head) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+
+ self->tx_buff.head =
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
+ if (!self->tx_buff.head) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
+ self->netdev = dev;
+
+ dev->netdev_ops = &w83977_netdev_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ net_err_ratelimited("%s:, register_netdevice() failed!\n",
+ __func__);
+ goto err_out3;
+ }
+ net_info_ratelimited("IrDA: Registered device %s\n", dev->name);
+
+ /* Need to store self somewhere */
+ dev_self[i] = self;
+
+ return 0;
+err_out3:
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+err_out2:
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+err_out1:
+ free_netdev(dev);
+err_out:
+ release_region(iobase, CHIP_IO_EXTENT);
+ return err;
+}
+
+/*
+ * Function w83977af_close (self)
+ *
+ * Close driver instance
+ *
+ */
+static int w83977af_close(struct w83977af_ir *self)
+{
+ int iobase;
+
+ iobase = self->io.fir_base;
+
+#ifdef CONFIG_USE_W977_PNP
+ /* enter PnP configuration mode */
+ w977_efm_enter(efio);
+
+ w977_select_device(W977_DEVICE_IR, efio);
+
+ /* Deactivate device */
+ w977_write_reg(0x30, 0x00, efio);
+
+ w977_efm_exit(efio);
+#endif /* CONFIG_USE_W977_PNP */
+
+ /* Remove netdevice */
+ unregister_netdev(self->netdev);
+
+ /* Release the PORT that this driver is using */
+ pr_debug("%s: Releasing Region %03x\n", __func__, self->io.fir_base);
+ release_region(self->io.fir_base, self->io.fir_ext);
+
+ if (self->tx_buff.head)
+ dma_free_coherent(NULL, self->tx_buff.truesize,
+ self->tx_buff.head, self->tx_buff_dma);
+
+ if (self->rx_buff.head)
+ dma_free_coherent(NULL, self->rx_buff.truesize,
+ self->rx_buff.head, self->rx_buff_dma);
+
+ free_netdev(self->netdev);
+
+ return 0;
+}
+
+static int w83977af_probe(int iobase, int irq, int dma)
+{
+ int version;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+#ifdef CONFIG_USE_W977_PNP
+ /* Enter PnP configuration mode */
+ w977_efm_enter(efbase[i]);
+
+ w977_select_device(W977_DEVICE_IR, efbase[i]);
+
+ /* Configure PnP port, IRQ, and DMA channel */
+ w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
+ w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
+
+ w977_write_reg(0x70, irq, efbase[i]);
+#ifdef CONFIG_ARCH_NETWINDER
+ /* Netwinder uses 1 higher than Linux */
+ w977_write_reg(0x74, dma + 1, efbase[i]);
+#else
+ w977_write_reg(0x74, dma, efbase[i]);
+#endif /* CONFIG_ARCH_NETWINDER */
+ w977_write_reg(0x75, 0x04, efbase[i]);/* Disable Tx DMA */
+
+ /* Set append hardware CRC, enable IR bank selection */
+ w977_write_reg(0xf0, APEDCRC | ENBNKSEL, efbase[i]);
+
+ /* Activate device */
+ w977_write_reg(0x30, 0x01, efbase[i]);
+
+ w977_efm_exit(efbase[i]);
+#endif /* CONFIG_USE_W977_PNP */
+ /* Disable Advanced mode */
+ switch_bank(iobase, SET2);
+ outb(iobase + 2, 0x00);
+
+ /* Turn on UART (global) interrupts */
+ switch_bank(iobase, SET0);
+ outb(HCR_EN_IRQ, iobase + HCR);
+
+ /* Switch to advanced mode */
+ switch_bank(iobase, SET2);
+ outb(inb(iobase + ADCR1) | ADCR1_ADV_SL, iobase + ADCR1);
+
+ /* Set default IR-mode */
+ switch_bank(iobase, SET0);
+ outb(HCR_SIR, iobase + HCR);
+
+ /* Read the Advanced IR ID */
+ switch_bank(iobase, SET3);
+ version = inb(iobase + AUID);
+
+ /* Should be 0x1? */
+ if (0x10 == (version & 0xf0)) {
+ efio = efbase[i];
+
+ /* Set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
+
+ /* Set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL | UFR_TXTL | UFR_TXF_RST | UFR_RXF_RST |
+ UFR_EN_FIFO, iobase + UFR);
+
+ /* Receiver frame length */
+ switch_bank(iobase, SET4);
+ outb(2048 & 0xff, iobase + 6);
+ outb((2048 >> 8) & 0x1f, iobase + 7);
+
+ /*
+ * Init HP HSDL-1100 transceiver.
+ *
+ * Set IRX_MSL since we have 2 * receive paths IRRX,
+ * and IRRXH. Clear IRSL0D since we want IRSL0 * to
+ * be a input pin used for IRRXH
+ *
+ * IRRX pin 37 connected to receiver
+ * IRTX pin 38 connected to transmitter
+ * FIRRX pin 39 connected to receiver (IRSL0)
+ * CIRRX pin 40 connected to pin 37
+ */
+ switch_bank(iobase, SET7);
+ outb(0x40, iobase + 7);
+
+ net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
+ version);
+
+ return 0;
+ } else {
+ /* Try next extented function register address */
+ pr_debug("%s: Wrong chip version\n", __func__);
+ }
+ }
+ return -1;
+}
+
+static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
+{
+ int ir_mode = HCR_SIR;
+ int iobase;
+ __u8 set;
+
+ iobase = self->io.fir_base;
+
+ /* Update accounting for new speed */
+ self->io.speed = speed;
+
+ /* Save current bank */
+ set = inb(iobase + SSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, SET0);
+ outb(0, iobase + ICR);
+
+ /* Select Set 2 */
+ switch_bank(iobase, SET2);
+ outb(0x00, iobase + ABHL);
+
+ switch (speed) {
+ case 9600: outb(0x0c, iobase + ABLL); break;
+ case 19200: outb(0x06, iobase + ABLL); break;
+ case 38400: outb(0x03, iobase + ABLL); break;
+ case 57600: outb(0x02, iobase + ABLL); break;
+ case 115200: outb(0x01, iobase + ABLL); break;
+ case 576000:
+ ir_mode = HCR_MIR_576;
+ pr_debug("%s: handling baud of 576000\n", __func__);
+ break;
+ case 1152000:
+ ir_mode = HCR_MIR_1152;
+ pr_debug("%s: handling baud of 1152000\n", __func__);
+ break;
+ case 4000000:
+ ir_mode = HCR_FIR;
+ pr_debug("%s: handling baud of 4000000\n", __func__);
+ break;
+ default:
+ ir_mode = HCR_FIR;
+ pr_debug("%s: unknown baud rate of %d\n", __func__, speed);
+ break;
+ }
+
+ /* Set speed mode */
+ switch_bank(iobase, SET0);
+ outb(ir_mode, iobase + HCR);
+
+ /* set FIFO size to 32 */
+ switch_bank(iobase, SET2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
+
+ /* set FIFO threshold to TX17, RX16 */
+ switch_bank(iobase, SET0);
+ outb(0x00, iobase + UFR); /* Reset */
+ outb(UFR_EN_FIFO, iobase + UFR); /* First we must enable FIFO */
+ outb(0xa7, iobase + UFR);
+
+ netif_wake_queue(self->netdev);
+
+ /* Enable some interrupts so we can receive frames */
+ switch_bank(iobase, SET0);
+ if (speed > PIO_MAX_SPEED) {
+ outb(ICR_EFSFI, iobase + ICR);
+ w83977af_dma_receive(self);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
+
+ /* Restore SSR */
+ outb(set, iobase + SSR);
+}
+
+/*
+ * Function w83977af_hard_xmit (skb, dev)
+ *
+ * Sets up a DMA transfer to send the current frame.
+ *
+ */
+static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ __s32 speed;
+ int iobase;
+ __u8 set;
+ int mtt;
+
+ self = netdev_priv(dev);
+
+ iobase = self->io.fir_base;
+
+ pr_debug("%s: %ld, skb->len=%d\n", __func__, jiffies, (int)skb->len);
+
+ /* Lock transmit buffer */
+ netif_stop_queue(dev);
+
+ /* Check if we need to change the speed */
+ speed = irda_get_next_speed(skb);
+ if ((speed != self->io.speed) && (speed != -1)) {
+ /* Check for empty frame */
+ if (!skb->len) {
+ w83977af_change_speed(self, speed);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ self->new_speed = speed;
+ }
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Decide if we should use PIO or DMA transfer */
+ if (self->io.speed > PIO_MAX_SPEED) {
+ self->tx_buff.data = self->tx_buff.head;
+ skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
+ self->tx_buff.len = skb->len;
+
+ mtt = irda_get_mtt(skb);
+ pr_debug("%s: %ld, mtt=%d\n", __func__, jiffies, mtt);
+ if (mtt > 1000)
+ mdelay(mtt / 1000);
+ else if (mtt)
+ udelay(mtt);
+
+ /* Enable DMA interrupt */
+ switch_bank(iobase, SET0);
+ outb(ICR_EDMAI, iobase + ICR);
+ w83977af_dma_write(self, iobase);
+ } else {
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ /* Add interrupt on tx low level (will fire immediately) */
+ switch_bank(iobase, SET0);
+ outb(ICR_ETXTHI, iobase + ICR);
+ }
+ dev_kfree_skb(skb);
+
+ /* Restore set register */
+ outb(set, iobase + SSR);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Function w83977af_dma_write (self, iobase)
+ *
+ * Send frame using DMA
+ *
+ */
+static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
+{
+ __u8 set;
+
+ pr_debug("%s: len=%d\n", __func__, self->tx_buff.len);
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
+ /* Choose transmit DMA channel */
+ switch_bank(iobase, SET2);
+ outb(ADCR1_D_CHSW | /*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase + ADCR1);
+ irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
+ DMA_MODE_WRITE);
+ self->io.direction = IO_XMIT;
+
+ /* Enable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) | HCR_EN_DMA | HCR_TX_WT, iobase + HCR);
+
+ /* Restore set register */
+ outb(set, iobase + SSR);
+}
+
+/*
+ * Function w83977af_pio_write (iobase, buf, len, fifo_size)
+ *
+ *
+ *
+ */
+static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
+{
+ int actual = 0;
+ __u8 set;
+
+ /* Save current bank */
+ set = inb(iobase + SSR);
+
+ switch_bank(iobase, SET0);
+ if (!(inb_p(iobase + USR) & USR_TSRE)) {
+ pr_debug("%s: warning, FIFO not empty yet!\n", __func__);
+
+ fifo_size -= 17;
+ pr_debug("%s: %d bytes left in tx fifo\n", __func__, fifo_size);
+ }
+
+ /* Fill FIFO with current frame */
+ while ((fifo_size-- > 0) && (actual < len)) {
+ /* Transmit next byte */
+ outb(buf[actual++], iobase + TBR);
+ }
+
+ pr_debug("%s: fifo_size %d ; %d sent of %d\n",
+ __func__, fifo_size, actual, len);
+
+ /* Restore bank */
+ outb(set, iobase + SSR);
+
+ return actual;
+}
+
+/*
+ * Function w83977af_dma_xmit_complete (self)
+ *
+ * The transfer of a frame in finished. So do the necessary things
+ *
+ *
+ */
+static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
+{
+ int iobase;
+ __u8 set;
+
+ pr_debug("%s: %ld\n", __func__, jiffies);
+
+ IRDA_ASSERT(self, return;);
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
+ /* Check for underrun! */
+ if (inb(iobase + AUDR) & AUDR_UNDR) {
+ pr_debug("%s: Transmit underrun!\n", __func__);
+
+ self->netdev->stats.tx_errors++;
+ self->netdev->stats.tx_fifo_errors++;
+
+ /* Clear bit, by writing 1 to it */
+ outb(AUDR_UNDR, iobase + AUDR);
+ } else {
+ self->netdev->stats.tx_packets++;
+ }
+
+ if (self->new_speed) {
+ w83977af_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Unlock tx_buff and request another frame */
+ /* Tell the network layer, that we want more frames */
+ netif_wake_queue(self->netdev);
+
+ /* Restore set */
+ outb(set, iobase + SSR);
+}
+
+/*
+ * Function w83977af_dma_receive (self)
+ *
+ * Get ready for receiving a frame. The device will initiate a DMA
+ * if it starts to receive a frame.
+ *
+ */
+static int w83977af_dma_receive(struct w83977af_ir *self)
+{
+ int iobase;
+ __u8 set;
+#ifdef CONFIG_ARCH_NETWINDER
+ unsigned long flags;
+ __u8 hcr;
+#endif
+ IRDA_ASSERT(self, return -1;);
+
+ pr_debug("%s\n", __func__);
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable DMA */
+ switch_bank(iobase, SET0);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
+
+ /* Choose DMA Rx, DMA Fairness, and Advanced mode */
+ switch_bank(iobase, SET2);
+ outb((inb(iobase + ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL,
+ iobase + ADCR1);
+
+ self->io.direction = IO_RECV;
+ self->rx_buff.data = self->rx_buff.head;
+
+#ifdef CONFIG_ARCH_NETWINDER
+ spin_lock_irqsave(&self->lock, flags);
+
+ disable_dma(self->io.dma);
+ clear_dma_ff(self->io.dma);
+ set_dma_mode(self->io.dma, DMA_MODE_READ);
+ set_dma_addr(self->io.dma, self->rx_buff_dma);
+ set_dma_count(self->io.dma, self->rx_buff.truesize);
+#else
+ irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
+ DMA_MODE_READ);
+#endif
+ /*
+ * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
+ * important that we don't reset the Tx FIFO since it might not
+ * be finished transmitting yet
+ */
+ switch_bank(iobase, SET0);
+ outb(UFR_RXTL | UFR_TXTL | UFR_RXF_RST | UFR_EN_FIFO, iobase + UFR);
+ self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
+
+ /* Enable DMA */
+ switch_bank(iobase, SET0);
+#ifdef CONFIG_ARCH_NETWINDER
+ hcr = inb(iobase + HCR);
+ outb(hcr | HCR_EN_DMA, iobase + HCR);
+ enable_dma(self->io.dma);
+ spin_unlock_irqrestore(&self->lock, flags);
+#else
+ outb(inb(iobase + HCR) | HCR_EN_DMA, iobase + HCR);
+#endif
+ /* Restore set */
+ outb(set, iobase + SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_receive_complete (self)
+ *
+ * Finished with receiving a frame
+ *
+ */
+static int w83977af_dma_receive_complete(struct w83977af_ir *self)
+{
+ struct sk_buff *skb;
+ struct st_fifo *st_fifo;
+ int len;
+ int iobase;
+ __u8 set;
+ __u8 status;
+
+ pr_debug("%s\n", __func__);
+
+ st_fifo = &self->st_fifo;
+
+ iobase = self->io.fir_base;
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ iobase = self->io.fir_base;
+
+ /* Read status FIFO */
+ switch_bank(iobase, SET5);
+ while ((status = inb(iobase + FS_FO)) & FS_FO_FSFDR) {
+ st_fifo->entries[st_fifo->tail].status = status;
+
+ st_fifo->entries[st_fifo->tail].len = inb(iobase + RFLFL);
+ st_fifo->entries[st_fifo->tail].len |= inb(iobase + RFLFH) << 8;
+
+ st_fifo->tail++;
+ st_fifo->len++;
+ }
+
+ while (st_fifo->len) {
+ /* Get first entry */
+ status = st_fifo->entries[st_fifo->head].status;
+ len = st_fifo->entries[st_fifo->head].len;
+ st_fifo->head++;
+ st_fifo->len--;
+
+ /* Check for errors */
+ if (status & FS_FO_ERR_MSK) {
+ if (status & FS_FO_LST_FR) {
+ /* Add number of lost frames to stats */
+ self->netdev->stats.rx_errors += len;
+ } else {
+ /* Skip frame */
+ self->netdev->stats.rx_errors++;
+
+ self->rx_buff.data += len;
+
+ if (status & FS_FO_MX_LEX)
+ self->netdev->stats.rx_length_errors++;
+
+ if (status & FS_FO_PHY_ERR)
+ self->netdev->stats.rx_frame_errors++;
+
+ if (status & FS_FO_CRC_ERR)
+ self->netdev->stats.rx_crc_errors++;
+ }
+ /* The errors below can be reported in both cases */
+ if (status & FS_FO_RX_OV)
+ self->netdev->stats.rx_fifo_errors++;
+
+ if (status & FS_FO_FSF_OV)
+ self->netdev->stats.rx_fifo_errors++;
+
+ } else {
+ /* Check if we have transferred all data to memory */
+ switch_bank(iobase, SET0);
+ if (inb(iobase + USR) & USR_RDR)
+ udelay(80); /* Should be enough!? */
+
+ skb = dev_alloc_skb(len + 1);
+ if (!skb) {
+ pr_info("%s: memory squeeze, dropping frame\n",
+ __func__);
+ /* Restore set register */
+ outb(set, iobase + SSR);
+
+ return FALSE;
+ }
+
+ /* Align to 20 bytes */
+ skb_reserve(skb, 1);
+
+ /* Copy frame without CRC */
+ if (self->io.speed < 4000000) {
+ skb_put(skb, len - 2);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 2);
+ } else {
+ skb_put(skb, len - 4);
+ skb_copy_to_linear_data(skb,
+ self->rx_buff.data,
+ len - 4);
+ }
+
+ /* Move to next frame */
+ self->rx_buff.data += len;
+ self->netdev->stats.rx_packets++;
+
+ skb->dev = self->netdev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ }
+ }
+ /* Restore set register */
+ outb(set, iobase + SSR);
+
+ return TRUE;
+}
+
+/*
+ * Function pc87108_pio_receive (self)
+ *
+ * Receive all data in receiver FIFO
+ *
+ */
+static void w83977af_pio_receive(struct w83977af_ir *self)
+{
+ __u8 byte = 0x00;
+ int iobase;
+
+ IRDA_ASSERT(self, return;);
+
+ iobase = self->io.fir_base;
+
+ /* Receive all characters in Rx FIFO */
+ do {
+ byte = inb(iobase + RBR);
+ async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
+ byte);
+ } while (inb(iobase + USR) & USR_RDR); /* Data available */
+}
+
+/*
+ * Function w83977af_sir_interrupt (self, eir)
+ *
+ * Handle SIR interrupt
+ *
+ */
+static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
+{
+ int actual;
+ __u8 new_icr = 0;
+ __u8 set;
+ int iobase;
+
+ pr_debug("%s: isr=%#x\n", __func__, isr);
+
+ iobase = self->io.fir_base;
+ /* Transmit FIFO low on data */
+ if (isr & ISR_TXTH_I) {
+ /* Write data left in transmit buffer */
+ actual = w83977af_pio_write(self->io.fir_base,
+ self->tx_buff.data,
+ self->tx_buff.len,
+ self->io.fifo_size);
+
+ self->tx_buff.data += actual;
+ self->tx_buff.len -= actual;
+
+ self->io.direction = IO_XMIT;
+
+ /* Check if finished */
+ if (self->tx_buff.len > 0) {
+ new_icr |= ICR_ETXTHI;
+ } else {
+ set = inb(iobase + SSR);
+ switch_bank(iobase, SET0);
+ outb(AUDR_SFEND, iobase + AUDR);
+ outb(set, iobase + SSR);
+
+ self->netdev->stats.tx_packets++;
+
+ /* Feed me more packets */
+ netif_wake_queue(self->netdev);
+ new_icr |= ICR_ETBREI;
+ }
+ }
+ /* Check if transmission has completed */
+ if (isr & ISR_TXEMP_I) {
+ /* Check if we need to change the speed? */
+ if (self->new_speed) {
+ pr_debug("%s: Changing speed!\n", __func__);
+ w83977af_change_speed(self, self->new_speed);
+ self->new_speed = 0;
+ }
+
+ /* Turn around and get ready to receive some data */
+ self->io.direction = IO_RECV;
+ new_icr |= ICR_ERBRI;
+ }
+
+ /* Rx FIFO threshold or timeout */
+ if (isr & ISR_RXTH_I) {
+ w83977af_pio_receive(self);
+
+ /* Keep receiving */
+ new_icr |= ICR_ERBRI;
+ }
+ return new_icr;
+}
+
+/*
+ * Function pc87108_fir_interrupt (self, eir)
+ *
+ * Handle MIR/FIR interrupt
+ *
+ */
+static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
+{
+ __u8 new_icr = 0;
+ __u8 set;
+ int iobase;
+
+ iobase = self->io.fir_base;
+ set = inb(iobase + SSR);
+
+ /* End of frame detected in FIFO */
+ if (isr & (ISR_FEND_I | ISR_FSF_I)) {
+ if (w83977af_dma_receive_complete(self)) {
+ /* Wait for next status FIFO interrupt */
+ new_icr |= ICR_EFSFI;
+ } else {
+ /* DMA not finished yet */
+
+ /* Set timer value, resolution 1 ms */
+ switch_bank(iobase, SET4);
+ outb(0x01, iobase + TMRL); /* 1 ms */
+ outb(0x00, iobase + TMRH);
+
+ /* Start timer */
+ outb(IR_MSL_EN_TMR, iobase + IR_MSL);
+
+ new_icr |= ICR_ETMRI;
+ }
+ }
+ /* Timer finished */
+ if (isr & ISR_TMR_I) {
+ /* Disable timer */
+ switch_bank(iobase, SET4);
+ outb(0, iobase + IR_MSL);
+
+ /* Clear timer event */
+ /* switch_bank(iobase, SET0); */
+/* outb(ASCR_CTE, iobase+ASCR); */
+
+ /* Check if this is a TX timer interrupt */
+ if (self->io.direction == IO_XMIT) {
+ w83977af_dma_write(self, iobase);
+
+ new_icr |= ICR_EDMAI;
+ } else {
+ /* Check if DMA has now finished */
+ w83977af_dma_receive_complete(self);
+
+ new_icr |= ICR_EFSFI;
+ }
+ }
+ /* Finished with DMA */
+ if (isr & ISR_DMA_I) {
+ w83977af_dma_xmit_complete(self);
+
+ /* Check if there are more frames to be transmitted */
+ /* if (irda_device_txqueue_empty(self)) { */
+
+ /* Prepare for receive
+ *
+ * ** Netwinder Tx DMA likes that we do this anyway **
+ */
+ w83977af_dma_receive(self);
+ new_icr = ICR_EFSFI;
+ /* } */
+ }
+
+ /* Restore set */
+ outb(set, iobase + SSR);
+
+ return new_icr;
+}
+
+/*
+ * Function w83977af_interrupt (irq, dev_id, regs)
+ *
+ * An interrupt from the chip has arrived. Time to do some work
+ *
+ */
+static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct w83977af_ir *self;
+ __u8 set, icr, isr;
+ int iobase;
+
+ self = netdev_priv(dev);
+
+ iobase = self->io.fir_base;
+
+ /* Save current bank */
+ set = inb(iobase + SSR);
+ switch_bank(iobase, SET0);
+
+ icr = inb(iobase + ICR);
+ isr = inb(iobase + ISR) & icr; /* Mask out the interesting ones */
+
+ outb(0, iobase + ICR); /* Disable interrupts */
+
+ if (isr) {
+ /* Dispatch interrupt handler for the current speed */
+ if (self->io.speed > PIO_MAX_SPEED)
+ icr = w83977af_fir_interrupt(self, isr);
+ else
+ icr = w83977af_sir_interrupt(self, isr);
+ }
+
+ outb(icr, iobase + ICR); /* Restore (new) interrupts */
+ outb(set, iobase + SSR); /* Restore bank register */
+ return IRQ_RETVAL(isr);
+}
+
+/*
+ * Function w83977af_is_receiving (self)
+ *
+ * Return TRUE is we are currently receiving a frame
+ *
+ */
+static int w83977af_is_receiving(struct w83977af_ir *self)
+{
+ int status = FALSE;
+ int iobase;
+ __u8 set;
+
+ IRDA_ASSERT(self, return FALSE;);
+
+ if (self->io.speed > 115200) {
+ iobase = self->io.fir_base;
+
+ /* Check if rx FIFO is not empty */
+ set = inb(iobase + SSR);
+ switch_bank(iobase, SET2);
+ if ((inb(iobase + RXFDTH) & 0x3f) != 0) {
+ /* We are receiving something */
+ status = TRUE;
+ }
+ outb(set, iobase + SSR);
+ } else {
+ status = (self->rx_buff.state != OUTSIDE_FRAME);
+ }
+
+ return status;
+}
+
+/*
+ * Function w83977af_net_open (dev)
+ *
+ * Start the device
+ *
+ */
+static int w83977af_net_open(struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ int iobase;
+ char hwname[32];
+ __u8 set;
+
+ IRDA_ASSERT(dev, return -1;);
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self, return 0;);
+
+ iobase = self->io.fir_base;
+
+ if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+ if (request_dma(self->io.dma, dev->name)) {
+ free_irq(self->io.irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Enable some interrupts so we can receive frames again */
+ switch_bank(iobase, SET0);
+ if (self->io.speed > 115200) {
+ outb(ICR_EFSFI, iobase + ICR);
+ w83977af_dma_receive(self);
+ } else {
+ outb(ICR_ERBRI, iobase + ICR);
+ }
+
+ /* Restore bank register */
+ outb(set, iobase + SSR);
+
+ /* Ready to play! */
+ netif_start_queue(dev);
+
+ /* Give self a hardware name */
+ sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
+
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(dev, &self->qos, hwname);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_net_close (dev)
+ *
+ * Stop the device
+ *
+ */
+static int w83977af_net_close(struct net_device *dev)
+{
+ struct w83977af_ir *self;
+ int iobase;
+ __u8 set;
+
+ IRDA_ASSERT(dev, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self, return 0;);
+
+ iobase = self->io.fir_base;
+
+ /* Stop device */
+ netif_stop_queue(dev);
+
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+
+ disable_dma(self->io.dma);
+
+ /* Save current set */
+ set = inb(iobase + SSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, SET0);
+ outb(0, iobase + ICR);
+
+ free_irq(self->io.irq, dev);
+ free_dma(self->io.dma);
+
+ /* Restore bank register */
+ outb(set, iobase + SSR);
+
+ return 0;
+}
+
+/*
+ * Function w83977af_net_ioctl (dev, rq, cmd)
+ *
+ * Process IOCTL commands for this device
+ *
+ */
+static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
+ struct w83977af_ir *self;
+ unsigned long flags;
+ int ret = 0;
+
+ IRDA_ASSERT(dev, return -1;);
+
+ self = netdev_priv(dev);
+
+ IRDA_ASSERT(self, return -1;);
+
+ pr_debug("%s: %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
+
+ spin_lock_irqsave(&self->lock, flags);
+
+ switch (cmd) {
+ case SIOCSBANDWIDTH: /* Set bandwidth */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ w83977af_change_speed(self, irq->ifr_baudrate);
+ break;
+ case SIOCSMEDIABUSY: /* Set media busy */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto out;
+ }
+ irda_device_set_media_busy(self->netdev, TRUE);
+ break;
+ case SIOCGRECEIVING: /* Check if we are receiving right now */
+ irq->ifr_receiving = w83977af_is_receiving(self);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+out:
+ spin_unlock_irqrestore(&self->lock, flags);
+ return ret;
+}
+
+MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
+MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
+MODULE_LICENSE("GPL");
+
+module_param(qos_mtt_bits, int, 0);
+MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "Base I/O addresses");
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ lines");
+
+/*
+ * Function init_module (void)
+ *
+ *
+ *
+ */
+module_init(w83977af_init);
+
+/*
+ * Function cleanup_module (void)
+ *
+ *
+ *
+ */
+module_exit(w83977af_cleanup);
diff --git a/drivers/staging/irda/drivers/w83977af_ir.h b/drivers/staging/irda/drivers/w83977af_ir.h
new file mode 100644
index 000000000000..fefe9b11e200
--- /dev/null
+++ b/drivers/staging/irda/drivers/w83977af_ir.h
@@ -0,0 +1,198 @@
+/*********************************************************************
+ *
+ * Filename: w83977af_ir.h
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Paul VanderSpek
+ * Created at: Thu Nov 19 13:55:34 1998
+ * Modified at: Tue Jan 11 13:08:19 2000
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#ifndef W83977AF_IR_H
+#define W83977AF_IR_H
+
+#include <asm/io.h>
+#include <linux/types.h>
+
+/* Flags for configuration register CRF0 */
+#define ENBNKSEL 0x01
+#define APEDCRC 0x02
+#define TXW4C 0x04
+#define RXW4C 0x08
+
+/* Bank 0 */
+#define RBR 0x00 /* Receiver buffer register */
+#define TBR 0x00 /* Transmitter buffer register */
+
+#define ICR 0x01 /* Interrupt configuration register */
+#define ICR_ERBRI 0x01 /* Receiver buffer register interrupt */
+#define ICR_ETBREI 0x02 /* Transeiver empty interrupt */
+#define ICR_EUSRI 0x04//* IR status interrupt */
+#define ICR_EHSRI 0x04
+#define ICR_ETXURI 0x04 /* Tx underrun */
+#define ICR_EDMAI 0x10 /* DMA interrupt */
+#define ICR_ETXTHI 0x20 /* Transmitter threshold interrupt */
+#define ICR_EFSFI 0x40 /* Frame status FIFO interrupt */
+#define ICR_ETMRI 0x80 /* Timer interrupt */
+
+#define UFR 0x02 /* FIFO control register */
+#define UFR_EN_FIFO 0x01 /* Enable FIFO's */
+#define UFR_RXF_RST 0x02 /* Reset Rx FIFO */
+#define UFR_TXF_RST 0x04 /* Reset Tx FIFO */
+#define UFR_RXTL 0x80 /* Rx FIFO threshold (set to 16) */
+#define UFR_TXTL 0x20 /* Tx FIFO threshold (set to 17) */
+
+#define ISR 0x02 /* Interrupt status register */
+#define ISR_RXTH_I 0x01 /* Receive threshold interrupt */
+#define ISR_TXEMP_I 0x02 /* Transmitter empty interrupt */
+#define ISR_FEND_I 0x04
+#define ISR_DMA_I 0x10
+#define ISR_TXTH_I 0x20 /* Transmitter threshold interrupt */
+#define ISR_FSF_I 0x40
+#define ISR_TMR_I 0x80 /* Timer interrupt */
+
+#define UCR 0x03 /* Uart control register */
+#define UCR_DLS8 0x03 /* 8N1 */
+
+#define SSR 0x03 /* Sets select register */
+#define SET0 UCR_DLS8 /* Make sure we keep 8N1 */
+#define SET1 (0x80|UCR_DLS8) /* Make sure we keep 8N1 */
+#define SET2 0xE0
+#define SET3 0xE4
+#define SET4 0xE8
+#define SET5 0xEC
+#define SET6 0xF0
+#define SET7 0xF4
+
+#define HCR 0x04
+#define HCR_MODE_MASK ~(0xD0)
+#define HCR_SIR 0x60
+#define HCR_MIR_576 0x20
+#define HCR_MIR_1152 0x80
+#define HCR_FIR 0xA0
+#define HCR_EN_DMA 0x04
+#define HCR_EN_IRQ 0x08
+#define HCR_TX_WT 0x08
+
+#define USR 0x05 /* IR status register */
+#define USR_RDR 0x01 /* Receive data ready */
+#define USR_TSRE 0x40 /* Transmitter empty? */
+
+#define AUDR 0x07
+#define AUDR_SFEND 0x08 /* Set a frame end */
+#define AUDR_RXBSY 0x20 /* Rx busy */
+#define AUDR_UNDR 0x40 /* Transeiver underrun */
+
+/* Set 2 */
+#define ABLL 0x00 /* Advanced baud rate divisor latch (low byte) */
+#define ABHL 0x01 /* Advanced baud rate divisor latch (high byte) */
+
+#define ADCR1 0x02
+#define ADCR1_ADV_SL 0x01
+#define ADCR1_D_CHSW 0x08 /* the specs are wrong. its bit 3, not 4 */
+#define ADCR1_DMA_F 0x02
+
+#define ADCR2 0x04
+#define ADCR2_TXFS32 0x01
+#define ADCR2_RXFS32 0x04
+
+#define RXFDTH 0x07
+
+/* Set 3 */
+#define AUID 0x00
+
+/* Set 4 */
+#define TMRL 0x00 /* Timer value register (low byte) */
+#define TMRH 0x01 /* Timer value register (high byte) */
+
+#define IR_MSL 0x02 /* Infrared mode select */
+#define IR_MSL_EN_TMR 0x01 /* Enable timer */
+
+#define TFRLL 0x04 /* Transmitter frame length (low byte) */
+#define TFRLH 0x05 /* Transmitter frame length (high byte) */
+#define RFRLL 0x06 /* Receiver frame length (low byte) */
+#define RFRLH 0x07 /* Receiver frame length (high byte) */
+
+/* Set 5 */
+
+#define FS_FO 0x05 /* Frame status FIFO */
+#define FS_FO_FSFDR 0x80 /* Frame status FIFO data ready */
+#define FS_FO_LST_FR 0x40 /* Frame lost */
+#define FS_FO_MX_LEX 0x10 /* Max frame len exceeded */
+#define FS_FO_PHY_ERR 0x08 /* Physical layer error */
+#define FS_FO_CRC_ERR 0x04
+#define FS_FO_RX_OV 0x02 /* Receive overrun */
+#define FS_FO_FSF_OV 0x01 /* Frame status FIFO overrun */
+#define FS_FO_ERR_MSK 0x5f /* Error mask */
+
+#define RFLFL 0x06
+#define RFLFH 0x07
+
+/* Set 6 */
+#define IR_CFG2 0x00
+#define IR_CFG2_DIS_CRC 0x02
+
+/* Set 7 */
+#define IRM_CR 0x07 /* Infrared module control register */
+#define IRM_CR_IRX_MSL 0x40
+#define IRM_CR_AF_MNT 0x80 /* Automatic format */
+
+/* For storing entries in the status FIFO */
+struct st_fifo_entry {
+ int status;
+ int len;
+};
+
+struct st_fifo {
+ struct st_fifo_entry entries[10];
+ int head;
+ int tail;
+ int len;
+};
+
+/* Private data for each instance */
+struct w83977af_ir {
+ struct st_fifo st_fifo;
+
+ int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
+ int tx_len; /* Number of frames in tx_buff */
+
+ struct net_device *netdev; /* Yes! we are some kind of netdevice */
+
+ struct irlap_cb *irlap; /* The link layer we are binded to */
+ struct qos_info qos; /* QoS capabilities for this device */
+
+ chipio_t io; /* IrDA controller information */
+ iobuff_t tx_buff; /* Transmit buffer */
+ iobuff_t rx_buff; /* Receive buffer */
+ dma_addr_t tx_buff_dma;
+ dma_addr_t rx_buff_dma;
+
+ /* Note : currently locking is *very* incomplete, but this
+ * will get you started. Check in nsc-ircc.c for a proper
+ * locking strategy. - Jean II */
+ spinlock_t lock; /* For serializing operations */
+
+ __u32 new_speed;
+};
+
+static inline void switch_bank( int iobase, int set)
+{
+ outb(set, iobase+SSR);
+}
+
+#endif